import importlib
import pathlib
import librosa
import yaml
import folder_paths
import os
import sys
import tempfile
import torchaudio
from typing import Optional
import torch
current_dir = os.path.dirname(os.path.abspath(__file__))
if current_dir not in sys.path:
    sys.path.append(current_dir)

from somesing2midi import inference
from somesing2midi.utils.config_utils import print_config
from somesing2midi.utils.infer_utils import build_midi_file
from somesing2midi.utils.slicer2 import Slicer


models_dir = folder_paths.models_dir
model_path = os.path.join(models_dir, "TTS")
config_path = os.path.join(model_path, "0119_continuous256_5spk", "config.yaml")
checkpoint_path = os.path.join(model_path, "0119_continuous256_5spk", "model_ckpt_steps_100000_simplified.ckpt")
cache_dir = folder_paths.get_temp_directory()
outpot_dir = folder_paths.get_output_directory() + "/midi"


def cache_audio_tensor(
    cache_dir,
    audio_tensor: torch.Tensor,
    sample_rate: int,
    filename_prefix: str = "cached_audio_",
    audio_format: Optional[str] = ".wav"
) -> str:
    try:
        with tempfile.NamedTemporaryFile(
            prefix=filename_prefix,
            suffix=audio_format,
            dir=cache_dir,
            delete=False 
        ) as tmp_file:
            temp_filepath = tmp_file.name
        
        torchaudio.save(temp_filepath, audio_tensor, sample_rate)

        return temp_filepath
    except Exception as e:
        raise Exception(f"Error caching audio tensor: {e}")


SOME = None
class SomeSing2Midi:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "audio": ("AUDIO",), 
                "tempo": ("INT", {"default": 120, "min": 10, "max": 500, "step": 10}), 
                "midi_name": ("STRING", {"default": "midi"}),
                "unload_model": ("BOOLEAN", {"default": True}),
                },
        }

    CATEGORY = "🎤MW/MW-SOME"
    RETURN_TYPES = ("AUDIO",)
    RETURN_NAMES = ("audio",)
    FUNCTION = "run"
    
    def run(self, audio, tempo, midi_name, unload_model):
        model_path = pathlib.Path(checkpoint_path)
        with open(config_path, 'r', encoding='utf8') as f:
            config = yaml.safe_load(f)
        # print_config(config)
        infer_cls = inference.task_inference_mapping[config['task_cls']]

        pkg = ".".join(infer_cls.split(".")[:-1])
        cls_name = infer_cls.split(".")[-1]
        infer_cls = getattr(importlib.import_module(pkg), cls_name)
        assert issubclass(infer_cls, inference.BaseInference), \
            f'Inference class {infer_cls} is not a subclass of {inference.BaseInference}.'

        config["model_cls"] = "somesing2midi." + config["model_cls"]
        config["pe_ckpt"] = os.path.join(model_path, "Seed-VC", "rmvpe.pt")

        global SOME
        if SOME is None:
            SOME = infer_cls(config=config, model_path=model_path)
            
        audio = cache_audio_tensor(cache_dir, audio["waveform"].squeeze(0), audio["sample_rate"])

        wav_path = pathlib.Path(audio)
        waveform, _ = librosa.load(wav_path, sr=config['audio_sample_rate'], mono=True)
        slicer = Slicer(sr=config['audio_sample_rate'], max_sil_kept=1000)
        chunks = slicer.slice(waveform)
        midis = SOME.infer([c['waveform'] for c in chunks])

        midi_file = build_midi_file([c['offset'] for c in chunks], midis, tempo=tempo)

        midi_path = pathlib.Path(outpot_dir + "/" + midi_name + ".mid")
        os.makedirs(midi_path.parent, exist_ok=True)
        midi_file.save(midi_path)
        print(f'MIDI file saved at: \'{midi_path}\'')

        if unload_model:
            SOME = None
            torch.cuda.empty_cache()
    
        mp3_path = self.midi2mp3(midi_path)

        if mp3_path and os.path.exists(mp3_path):
            import torchaudio
            waveform, sample_rate = torchaudio.load(mp3_path)
            audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
        else:
            audio = self.get_empty_audio()

        return (audio,)

    def get_empty_audio(self):
        """Return empty audio"""
        return {"waveform": torch.zeros(1, 2, 1), "sample_rate": 44100}

    def midi2mp3(self, midi_path):
        import subprocess
        mp3_path = str(midi_path).rsplit(".", 1)[0] + ".mp3"
        try:
            import shutil
            musescore_executable_path = shutil.which('MuseScore4')
            print(musescore_executable_path)
            subprocess.run(
                [musescore_executable_path, '-o', mp3_path, midi_path],
                check=True,
                capture_output=True,
            )

            if self.wait_for_file(mp3_path):
                print(f"Conversion to {mp3_path} completed")
                return mp3_path
            else:
                print("MP3 conversion timeout")
                return None
        except subprocess.CalledProcessError as e:
            print(f"Conversion failed: {e.stderr}" if e.stderr else "Unknown error")
            return None

    def wait_for_file(self, file_path, timeout=15, check_interval=0.3):
        """Wait for file generation to complete"""
        import time
        start_time = time.time()

        while time.time() - start_time < timeout:
            if os.path.exists(file_path):

                if file_path.endswith('.mp3'):
                    initial_size = os.path.getsize(file_path)
                    time.sleep(check_interval)
                    if os.path.getsize(file_path) == initial_size:
                        return True
                else:
                    return True
            time.sleep(check_interval)
        return False


NODE_CLASS_MAPPINGS = {
    "SomeSing2Midi": SomeSing2Midi
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "SomeSing2Midi": "SOME Sing to Midi"
}
