DungeonCrawler/comfyui-audio/comfyui_audiocraft/nodes.py
Andre 4feff43758 ComfyUI AudioCraft: verbesserte Nodes + Workflow Template
- nodes.py: temperature, cfg_coef, top_k, extend_stride Parameter
- AudioUpsample Node: 16kHz/32kHz -> 48kHz fuer bessere Qualitaet
- AudioPreview Node: Vorschau direkt in ComfyUI UI
- workflow_game_audio.json: Template mit Musik + SFX Pipeline
- Standardmodell: musicgen-stereo-medium (besserer Sound)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-21 14:22:45 +01:00

159 lines
5.6 KiB
Python

import torch
import torchaudio
import os
import time
class MusicGenNode:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"prompt": ("STRING", {"multiline": True, "default": "dark dungeon ambience, slow, mysterious"}),
"duration": ("FLOAT", {"default": 10.0, "min": 1.0, "max": 120.0, "step": 1.0}),
"model": (["facebook/musicgen-stereo-medium", "facebook/musicgen-stereo-large", "facebook/musicgen-medium", "facebook/musicgen-large", "facebook/musicgen-small"],),
"seed": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
"temperature": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 2.0, "step": 0.05}),
"cfg_coef": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10.0, "step": 0.5}),
"top_k": ("INT", {"default": 250, "min": 1, "max": 1000}),
"extend_stride": ("FLOAT", {"default": 18.0, "min": 1.0, "max": 30.0, "step": 1.0}),
}
}
RETURN_TYPES = ("AUDIO_PATH",)
RETURN_NAMES = ("audio_file",)
FUNCTION = "generate"
CATEGORY = "AudioCraft"
OUTPUT_NODE = True
def generate(self, prompt, duration, model, seed, temperature, cfg_coef, top_k, extend_stride):
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
torch.manual_seed(seed)
print(f"[MusicGen] Loading model: {model}")
mg = MusicGen.get_pretrained(model)
mg.set_generation_params(
duration=duration,
temperature=temperature,
cfg_coef=cfg_coef,
top_k=top_k,
extend_stride=extend_stride,
)
print(f"[MusicGen] Generating: {prompt}")
wav = mg.generate([prompt])
output_dir = "/app/ComfyUI/output/audio"
os.makedirs(output_dir, exist_ok=True)
filename = f"musicgen_{int(time.time())}"
out_path = os.path.join(output_dir, filename)
audio_write(out_path, wav[0].cpu(), mg.sample_rate, strategy="loudness")
return (out_path + ".wav",)
class AudioGenNode:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"prompt": ("STRING", {"multiline": True, "default": "sword clash metal sound effect"}),
"duration": ("FLOAT", {"default": 3.0, "min": 0.5, "max": 30.0, "step": 0.5}),
"seed": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
"temperature": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 2.0, "step": 0.05}),
"cfg_coef": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10.0, "step": 0.5}),
"top_k": ("INT", {"default": 250, "min": 1, "max": 1000}),
}
}
RETURN_TYPES = ("AUDIO_PATH",)
RETURN_NAMES = ("audio_file",)
FUNCTION = "generate"
CATEGORY = "AudioCraft"
OUTPUT_NODE = True
def generate(self, prompt, duration, seed, temperature, cfg_coef, top_k):
from audiocraft.models import AudioGen
from audiocraft.data.audio import audio_write
torch.manual_seed(seed)
print(f"[AudioGen] Loading model...")
ag = AudioGen.get_pretrained("facebook/audiogen-medium")
ag.set_generation_params(
duration=duration,
temperature=temperature,
cfg_coef=cfg_coef,
top_k=top_k,
)
print(f"[AudioGen] Generating: {prompt}")
wav = ag.generate([prompt])
output_dir = "/app/ComfyUI/output/audio"
os.makedirs(output_dir, exist_ok=True)
filename = f"audiogen_{int(time.time())}"
out_path = os.path.join(output_dir, filename)
audio_write(out_path, wav[0].cpu(), ag.sample_rate, strategy="loudness")
return (out_path + ".wav",)
class AudioUpsampleNode:
"""Upsampling via torchaudio - verbessert Qualität von 16kHz/32kHz auf 48kHz"""
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"audio_file": ("AUDIO_PATH",),
"target_sr": ([48000, 44100], {"default": 48000}),
}
}
RETURN_TYPES = ("AUDIO_PATH",)
RETURN_NAMES = ("audio_file",)
FUNCTION = "upsample"
CATEGORY = "AudioCraft"
OUTPUT_NODE = True
def upsample(self, audio_file, target_sr):
wav, sr = torchaudio.load(audio_file)
if sr != target_sr:
resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)
wav = resampler(wav)
out_path = audio_file.replace(".wav", f"_{target_sr}hz.wav")
torchaudio.save(out_path, wav, target_sr)
print(f"[Upsample] {sr}Hz → {target_sr}Hz: {out_path}")
return (out_path,)
class AudioPreviewNode:
"""Zeigt Audiodatei in der ComfyUI-UI an"""
@classmethod
def INPUT_TYPES(cls):
return {"required": {"audio_file": ("AUDIO_PATH",)}}
RETURN_TYPES = ()
FUNCTION = "preview"
CATEGORY = "AudioCraft"
OUTPUT_NODE = True
def preview(self, audio_file):
return {"ui": {"audio": [{"filename": os.path.basename(audio_file), "subfolder": "audio", "type": "output"}]}}
NODE_CLASS_MAPPINGS = {
"MusicGen": MusicGenNode,
"AudioGen": AudioGenNode,
"AudioUpsample": AudioUpsampleNode,
"AudioPreview": AudioPreviewNode,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"MusicGen": "MusicGen (Musik)",
"AudioGen": "AudioGen (Sound Effects)",
"AudioUpsample": "Audio Upsample (Qualität)",
"AudioPreview": "Audio Preview",
}