import os
from pathlib import Path
import numpy as np
import soundfile as sf
import re
from cached_path import cached_path
from importlib.resources import files
import tomli
from loguru import logger
import torch

from f5_tts.infer.utils_infer import (
    infer_process,
    load_model,
    load_vocoder,
    preprocess_ref_audio_text,
    remove_silence_for_generated_wav,
)
from f5_tts.model import DiT, UNetT


class F5TTSInference:
    def __init__(
        self,
        model_type="F5-TTS",
        vocoder_name="vocos",
        load_vocoder_from_local=False,
        config_path=None,
        ckpt_file=None,
        vocab_file=None,
        speed=1.0,
        device="cuda" if torch.cuda.is_available() else "cpu",
    ):
        """
        Initialize F5TTS inference engine

        Args:
            model_type: "F5-TTS" or "E2-TTS"
            vocoder_name: "vocos" or "bigvgan"
            load_vocoder_from_local: Whether to load vocoder from local path
            config_path: Path to config file
            ckpt_file: Path to checkpoint file
            vocab_file: Path to vocabulary file
            speed: Speed factor for audio generation
        """
        self.device = device
        if torch.backends.mps.is_built():
            # device = torch.device("mps")
            logger.info("running on mac!")
        self.model_type = model_type
        self.vocoder_name = vocoder_name
        self.speed = speed

        # Load config
        if config_path is None:
            config_path = os.path.join(
                os.path.dirname(os.path.dirname(__file__)),
                "infer/examples/basic",
                "basic.toml",
            )
        self.config = tomli.load(open(config_path, "rb"))

        # Initialize vocoder
        self._init_vocoder(load_vocoder_from_local)

        # Initialize model
        self._init_model(ckpt_file, vocab_file)
        logger.info("model initialized.")

    def _init_vocoder(self, load_vocoder_from_local):
        """Initialize vocoder"""
        if self.vocoder_name == "vocos":
            # vocoder_local_path = "../checkpoints/vocos-mel-24khz"
            vocoder_local_path = "checkpoints/vocos-mel-24khz"
        elif self.vocoder_name == "bigvgan":
            # vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x"
            vocoder_local_path = "checkpoints/bigvgan_v2_24khz_100band_256x"

        self.vocoder = load_vocoder(
            vocoder_name=self.vocoder_name,
            is_local=load_vocoder_from_local,
            local_path=vocoder_local_path,
        )
        self.vocoder.to(self.device)

    def _init_model(self, ckpt_file, vocab_file):
        """Initialize TTS model"""
        if self.model_type == "F5-TTS":
            model_cls = DiT
            model_cfg = dict(
                dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4
            )
            if not ckpt_file:
                if self.vocoder_name == "vocos":
                    repo_name, exp_name = "F5-TTS", "F5TTS_Base"
                    ckpt_step = 1200000
                    ckpt_file = str(
                        cached_path(
                            f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors",
                            cache_dir="checkpoints",
                        )
                    )
                elif self.vocoder_name == "bigvgan":
                    repo_name, exp_name = "F5-TTS", "F5TTS_Base_bigvgan"
                    ckpt_step = 1250000
                    ckpt_file = str(
                        cached_path(
                            f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.pt",
                            cache_dir="checkpoints",
                        )
                    )

        elif self.model_type == "E2-TTS":
            assert self.vocoder_name == "vocos", "E2-TTS only supports vocoder vocos"
            model_cls = UNetT
            model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
            if not ckpt_file:
                repo_name, exp_name = "E2-TTS", "E2TTS_Base"
                ckpt_step = 1200000
                ckpt_file = str(
                    cached_path(
                        f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"
                    )
                )

        if vocab_file is None:
            vocab_file = "checkpoints/f5tts/vocab.txt"
            if not os.path.exists(vocab_file):
                os.makedirs(os.path.dirname(vocab_file), exist_ok=True)
                torch.hub.download_url_to_file(
                    "https://hf-mirror.com/SWivid/F5-TTS/resolve/main/F5TTS_Base/vocab.txt",
                    vocab_file,
                    progress=True,
                )
                logger.info(f"vocab file downloaded into: {vocab_file}")
        self.model = load_model(
            model_cls,
            model_cfg,
            ckpt_file,
            mel_spec_type=self.vocoder_name,
            vocab_file=vocab_file,
        )
        self.model.to(self.device)

    def _add_silence(self, audio, duration):
        if duration > 0:
            silence = torch.zeros(duration, audio.shape[1], device=audio.device)
            audio = torch.cat([silence, audio], dim=0)
        return audio

    def generate(
        self,
        ref_audio,
        ref_text,
        gen_text,
        fix_duration,
        output_dir="outputs",
        output_file="output.wav",
        remove_silence=False,
    ):
        """
        Generate speech from text

        Args:
            ref_audio: Path to reference audio file
            ref_text: Reference text
            gen_text: Text to generate
            output_dir: Output directory
            output_file: Output filename
            remove_silence: Whether to remove silence from generated audio
        """
        # Process voices
        main_voice = {"ref_audio": ref_audio, "ref_text": ref_text}
        voices = self.config.get("voices", {"main": main_voice})
        voices["main"] = main_voice

        for voice in voices:
            voices[voice]["ref_audio"], voices[voice]["ref_text"] = (
                preprocess_ref_audio_text(
                    voices[voice]["ref_audio"], voices[voice]["ref_text"]
                )
            )

        # Generate audio segments
        generated_segments = []
        chunks = re.split(r"(?=\[\w+\])", gen_text)

        for text in chunks:
            if not text.strip():
                continue

            # Determine voice
            match = re.match(r"\[(\w+)\]", text)
            voice = match[1] if match else "main"
            if voice not in voices:
                print(f"Voice {voice} not found, using main.")
                voice = "main"

            # Generate audio
            text = re.sub(r"\[\w+\]", "", text).strip()
            audio, sample_rate, spec = infer_process(
                voices[voice]["ref_audio"],
                voices[voice]["ref_text"],
                text,
                self.model,
                self.vocoder,
                mel_spec_type=self.vocoder_name,
                speed=self.speed,
                device=self.device,
                fix_duration=fix_duration,
            )
            generated_segments.append(audio)

        # Save final audio
        if generated_segments:
            final_wave = np.concatenate(generated_segments)

            os.makedirs(output_dir, exist_ok=True)
            wave_path = Path(output_dir) / output_file

            with open(wave_path, "wb") as f:
                sf.write(f.name, final_wave, sample_rate)
                if remove_silence:
                    remove_silence_for_generated_wav(f.name)

            return str(wave_path)

        return None


# Usage example
if __name__ == "__main__":
    # Initialize TTS engine
    tts = F5TTSInference(model_type="F5-TTS", vocoder_name="vocos", speed=1.0)

    # Generate speech
    output_path = tts.generate(
        ref_audio="path/to/ref.wav",
        ref_text="Reference text",
        gen_text="Text to generate",
        output_dir="outputs",
        output_file="generated.wav",
        remove_silence=True,
    )

    print(f"Generated audio saved to: {output_path}")
