DungeonCrawler/comfyui-audio/comfyui_audiocraft/nodes.py
Andre f3c895ac43 Add ComfyUI AudioCraft custom nodes und Game Audio Workflow
- comfyui_audiocraft: eigene MusicGen + AudioGen Nodes
- workflow_game_audio.json: vorgefertigter Workflow fuer Dungeon Musik und Sound Effects

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-20 17:55:54 +01:00

92 lines
2.9 KiB
Python

import torch
import torchaudio
import os
import time
class MusicGenNode:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"prompt": ("STRING", {"multiline": True, "default": "dark dungeon ambience, slow, mysterious"}),
"duration": ("FLOAT", {"default": 10.0, "min": 1.0, "max": 60.0, "step": 1.0}),
"model": (["facebook/musicgen-small", "facebook/musicgen-medium", "facebook/musicgen-large", "facebook/musicgen-stereo-medium"],),
"seed": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
}
}
RETURN_TYPES = ("AUDIO_PATH",)
RETURN_NAMES = ("audio_file",)
FUNCTION = "generate"
CATEGORY = "AudioCraft"
def generate(self, prompt, duration, model, seed):
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
torch.manual_seed(seed)
print(f"[MusicGen] Loading model: {model}")
mg = MusicGen.get_pretrained(model)
mg.set_generation_params(duration=duration)
print(f"[MusicGen] Generating: {prompt}")
wav = mg.generate([prompt])
output_dir = "/app/ComfyUI/output/audio"
os.makedirs(output_dir, exist_ok=True)
filename = f"musicgen_{int(time.time())}"
out_path = os.path.join(output_dir, filename)
audio_write(out_path, wav[0].cpu(), mg.sample_rate, strategy="loudness")
return (out_path + ".wav",)
class AudioGenNode:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"prompt": ("STRING", {"multiline": True, "default": "sword clash metal sound effect"}),
"duration": ("FLOAT", {"default": 3.0, "min": 0.5, "max": 30.0, "step": 0.5}),
"seed": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
}
}
RETURN_TYPES = ("AUDIO_PATH",)
RETURN_NAMES = ("audio_file",)
FUNCTION = "generate"
CATEGORY = "AudioCraft"
def generate(self, prompt, duration, seed):
from audiocraft.models import AudioGen
from audiocraft.data.audio import audio_write
torch.manual_seed(seed)
print(f"[AudioGen] Loading model...")
ag = AudioGen.get_pretrained("facebook/audiogen-medium")
ag.set_generation_params(duration=duration)
print(f"[AudioGen] Generating: {prompt}")
wav = ag.generate([prompt])
output_dir = "/app/ComfyUI/output/audio"
os.makedirs(output_dir, exist_ok=True)
filename = f"audiogen_{int(time.time())}"
out_path = os.path.join(output_dir, filename)
audio_write(out_path, wav[0].cpu(), ag.sample_rate, strategy="loudness")
return (out_path + ".wav",)
NODE_CLASS_MAPPINGS = {
"MusicGen": MusicGenNode,
"AudioGen": AudioGenNode,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"MusicGen": "MusicGen (Musik)",
"AudioGen": "AudioGen (Sound Effects)",
}