import torch
import time
import soundfile as sf
import numpy as np
import re, os
from kokoro import KPipeline, KModel
from typing import List, Optional, Tuple

SAMPLE_RATE = 24000
N_ZEROS = 5000

# 加载模型
repo_id = 'Kokoro-82M-v1.1-zh'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_path = './tools/kk_tts/models/kokoro-v1_1-zh.pth'
config_path = './tools/kk_tts/models/config.json'
               
class KkTTS:
    def __init__(self, voice_zf = "af_heart.pt", lang_code='ze'):
        # 加载音色
        self.voice_zf = voice_zf
        self.voice_zf_tensor = torch.load(f"./tools/kk_tts/pts/{voice_zf}", weights_only=True)
        self.model = KModel(model=model_path, config=config_path, repo_id=repo_id).to(device).eval()
        if lang_code == "ze":
            self.pipeline = KPipeline(lang_code='z', repo_id=repo_id, model=self.model, en_callable=self.en_callable)
        else:
            self.pipeline = KPipeline(lang_code=lang_code, repo_id=repo_id, model=self.model)
    
    # 中英混合时
    def en_callable(self, text):
        # 对于中英混杂的文本，需要在 KPipeline 中增加 en_callable 回调函数，并传入英文音色
        en_pipeline = KPipeline(lang_code='a', repo_id=repo_id, model=self.model)
        if text == 'Kokoro':
            return 'kˈOkəɹO'
        elif text == 'Sol':
            return 'sˈOl'
        return next(en_pipeline(text, voice=self.voice_zf_tensor)).phonemes
            
    # 其中，speed 支持 float 类型的语速控制，speed_callable为官方提供的默认回调函数
    def speed_callable(self, len_ps):
        speed = 0.8
        if len_ps <= 83:
            speed = 1
        elif len_ps < 183:
            speed = 1 - (len_ps - 83) / 500
        return speed * 1.1

    # 仅中文
    def run_tts(self, text: str, output: Optional[str] = None):
        print(f"\n{text}")
        # 开始推理
        start_time = time.time()
        generator = self.pipeline(text, voice=self.voice_zf_tensor, speed=self.speed_callable)
        result = next(generator)
        wav = result.audio
        speech_len = len(wav) / SAMPLE_RATE
        print('分句时长 {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len)) 
        
        if output != None:
            sf.write(f"./audios/{output}.wav", wav, SAMPLE_RATE)
        return wav
    
    # 返回分割后的字符列表
    def split_text(self, text: str):
        # 包含中文空格
        p = text.replace('\n', '')
        
        # 使用正则表达式分割文本，并保留分隔符
        segments = re.split(r'([。；！？.!?;])', p)

        # 重新组合句子和分隔符
        sentences = []
        for i in range(0, len(segments), 2):
            if segments[i]:  # 跳过可能的空字符串
                sentence = segments[i]
                if i + 1 < len(segments):  # 如果有对应的分隔符
                    sentence += segments[i + 1]
                    
                # 过滤空格字符
                s = sentence.strip()
                if len(s) > 0:
                    sentences.append(s)
                    
        return sentences
    
    # 使用sentences生成语音
    def start_text_to_audio(self, sentences: List[str], output: str):
        wavs = []
        timestamps = []
        current_time = 0.0
        
        for i, line in enumerate(sentences):
            # 记录开始时间    
            start_time = current_time
            
            # 保存音频
            wav = self.run_tts(text=line)
            if i == 0 and wavs and N_ZEROS > 0:
                wav = np.concatenate([np.zeros(N_ZEROS), wav])
                start_time += N_ZEROS / SAMPLE_RATE
            
            # 保存单元的时间戳和内容
            timestamps.append((start_time, line))
            
            # 更新当前时间
            current_time += len(wav) / SAMPLE_RATE
            wavs.append(wav)
        
        # 生成语音
        audio_path = f'./audios/{output}_{self.voice_zf}.wav'    
        sf.write(audio_path, np.concatenate(wavs), SAMPLE_RATE)
         
        # 生成LRC文件
        lrc_path = f'./audios/{output}_{self.voice_zf}.lrc'
        self.generate_lrc_file(timestamps, lrc_path)
        
        return audio_path, lrc_path
    
    # 生成LRC文件
    def generate_lrc_file(self, timestamps: List[Tuple[float, str]], output_path: str):
        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        with open(output_path, 'w', encoding='utf-8') as f:
            # 写入LRC文件头部信息
            f.write("[ti:Generated TTS]\n")
            f.write(f"[ar:{self.voice_zf}]\n")
            f.write(f"[al:Kokoro TTS]\n")
            f.write(f"[by:KkTTS]\n\n")
            
            # 写入每句话的时间戳
            for time_sec, text in timestamps:
                minutes = int(time_sec // 60)
                seconds = time_sec % 60
                timestamp = f"[{minutes:02d}:{seconds:06.3f}]"
                f.write(f"{timestamp}{text}\n")
        
        print(f"LRC歌词文件已生成: {output_path}")
    
    # 更细粒度的LRC生成（按词或字拆分）
    def generate_detailed_lrc(self, text: str, output: str, granularity: str = 'sentence'):
        if granularity == 'sentence':
            # 按句子拆分
            sentences = self.split_text(text)
            return self.start_text_to_audio(sentences, output)
        
        elif granularity == 'word':
            # 按词拆分（简单处理，以空格为分隔）
            text = text.replace('\n', ' ')
            words = []
            
            # 中文需要特殊处理，按字符拆分再按词组合
            if self.has_english_char(text) in ['z', 'ze']:
                # 先按标点和空格拆分
                segments = re.split(r'([。；！？.!?;\s])', text)
                
                for segment in segments:
                    if segment.strip():
                        # 对于英文按空格拆分，中文按字符拆分
                        if re.search(r'[a-zA-Z]', segment):
                            words.extend(segment.split())
                        else:
                            # 中文字符一个个添加
                            for char in segment:
                                if char.strip():
                                    words.append(char)
            else:
                # 纯英文按空格拆分
                words = text.split()
            
            # 生成LRC
            return self.process_text_units_for_lrc(words, output)
            
        elif granularity == 'char':
            # 按字符拆分
            chars = [char for char in text if char.strip()]
            return self.process_text_units_for_lrc(chars, output)
        
        else:
            raise ValueError(f"不支持的粒度: {granularity}，请使用 'sentence', 'word' 或 'char'")
    
    # 处理文本单元（词或字符）生成LRC
    def process_text_units_for_lrc(self, units: List[str], output: str):
        wavs = []
        timestamps = []
        current_time = 0.0
        
        # 处理每个单元
        for i, unit in enumerate(units):
            unit_text = unit.strip()
            if not unit_text:
                continue
                
            start_time = current_time
            
            # 生成语音
            wav = self.run_tts(text=unit_text)
            
            # 添加初始静音（仅对第一个单元）
            if i == 0 and N_ZEROS > 0:
                wav = np.concatenate([np.zeros(N_ZEROS), wav])
                start_time += N_ZEROS / SAMPLE_RATE
            
            # 保存单元的时间戳和内容
            timestamps.append((start_time, unit_text))
            
            # 更新当前时间
            current_time += len(wav) / SAMPLE_RATE
            
            # 保存音频
            wavs.append(wav)
        
        # 合并所有音频并保存
        audio_path = f'./audios/{output}_{self.voice_zf}.wav'
        sf.write(audio_path, np.concatenate(wavs), SAMPLE_RATE)
        
        # 生成LRC文件
        lrc_path = f'./audios/{output}_{self.voice_zf}.lrc'
        self.generate_lrc_file(timestamps, lrc_path)
        
        return audio_path, lrc_path
    
    # 判断是否纯中文
    @staticmethod
    def has_english_char(text: str):
        # 匹配常见中文字符（Unicode范围：\u4e00-\u9fff）
        has_zh = bool(re.search(r'[\u4e00-\u9fff]', text))
        has_en = bool(re.search(r'[a-zA-Z]', text))
        
        if has_zh and has_en:
            return 'ze'
        elif has_zh and not has_en:
            return "z"
        elif not has_zh and has_en:
            return 'a'
        else:
            return 'ze'