from AR.models.t2s_lightning_module import Text2SemanticLightningModule
from transformers import AutoModelForMaskedLM, AutoTokenizer # BERT 目的 word2token 
from transformers import Wav2Vec2FeatureExtractor, HubertModel # HUBERT 目的 audio2prompt
from module.models import SynthesizerTrn
from text import cleaned_text_to_sequence, chinese
from text.cleaner import clean_text_inf, clean_text
from funasr import AutoModel
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from utils import get_hparams_from_file, DictToAttrRecursive
from utils.text_normalize import Normalizer as ZhNormalizer
from utils.cutter import split, splits, cutter
from utils.slice import Slicer
import torch, os, re
import LangSegment
import traceback
from torch import nn
from copy import deepcopy as copy
import json, ffmpeg, yaml
import numpy as np
import librosa, soundfile as sf
from scipy.io import wavfile
import glob
## functions
def load_audio(file, sr):
    try:
        if os.path.exists(file) == False:
            raise RuntimeError(
                "You input a wrong audio path that does not exists, please fix it!"
            )
        out, _ = (
            ffmpeg.input(file, threads=0)
            .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
            .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
        )
    except Exception as e:
        traceback.print_exc()
        raise RuntimeError("音频加载失败")
    return np.frombuffer(out, np.float32).flatten()
  

## class

class NullTNProcessor:
    def __init__(self, *args, **kwargs):
        pass
    
    def normalize(self, text):
        return text

class CNHubert(nn.Module):
    def __init__(self, base_path):
        super().__init__()
        if os.path.exists(base_path):pass
        else:raise FileNotFoundError(base_path)
        self.model = HubertModel.from_pretrained(base_path, local_files_only=True)
        self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
            base_path, local_files_only=True
        )

# %% Pipeline Abstract
class PipelineAbstract():
    def to_device(self, device):
      self.device = device
      for k, v in self.__dict__.items():
        if isinstance(self.__dict__[k], torch.Tensor):
          self.__dict__[k] = self.__dict__[k].to(device)
        if isinstance(self.__dict__[k], (nn.Module, nn.ModuleDict, nn.ModuleList)):
          self.__dict__[k] = self.__dict__[k].to(device)
      return self
        
  
    def __getstate__(self):
      # 自定义序列化方法
      state = self.__dict__.copy()
      state["model_params"] = {}
      for k, v in self.__dict__.items():
        if isinstance(self.__dict__[k], torch.Tensor):
          state[k] = v.detach().cpu()
        if isinstance(self.__dict__[k], (nn.Module, nn.ModuleDict, nn.ModuleList)):
          state[k] = type(v)
          state['model_params'][k] = v.state_dict() if hasattr(v, 'state_dict') else None
      return state

    def __setstate__(self, state): # 反序列化时自动init_model
      if "model_params" in state:
        model_params = state.pop("model_params")
      else:
        model_params = {}
      self.__dict__.update(state)
      if hasattr(self, "init_model"):
        self.init_model()
        print("INIT MODEL!")
      else:
        for k, v in model_params.items():
          c = self.__dict__[k]
          self.__dict__[k] = c() if isinstance(v, type) else c
          self.__dict__[k].load_state_dict(v)
        print("LOAD MODEL PARAMS!")

# %% Inference General
class GeneralPipeline(PipelineAbstract):
  def __init__(self, hubert_path, bert_path, sr=16000, device="cuda:0", is_half=False, init_model=True):
    super().__init__()
    self.hubert_path = hubert_path
    self.bert_path = bert_path
    # self.zh_tn_model = ZhNormalizer(remove_erhua=True, overwrite_cache=True) # 中文表达转语音表达
    self.zh_tn_model = NullTNProcessor() # 由于ZhNormalizer启动太慢，测试时使用NullTNProcessor
    self.dtype = torch.float16 if is_half else torch.float32
    # init
    self.is_init = False
    self.is_half = is_half
    self.device = device
    self.zero_wav = np.zeros( # 静音部分
          int(sr * 0.5),
          dtype=np.float16 if is_half else np.float32,
      )
    if init_model:
      self.init_model()
      
  def init_model(self):
    self.tokenizer = AutoTokenizer.from_pretrained(self.bert_path)
    self.bert_model = AutoModelForMaskedLM.from_pretrained(self.bert_path)
    self.ssl_model = CNHubert(self.hubert_path).eval()
    if self.is_half:
      self.ssl_model = self.ssl_model.half()
      self.bert_model = self.bert_model.half()
    self.ssl_model = self.ssl_model.to(self.device)
    self.bert_model = self.bert_model.to(self.device)
    self.is_init = True
    
  def to_device(self, device):
    self.device = device
    self.ssl_model = self.ssl_model.to(device)
    self.bert_model = self.bert_model.to(device)
    return self
  
  def _call_preprocess(self, raw:dict, ref:dict={}, **kwargs):
    version = kwargs.get("version", "v2")
    is_ref = True
    text = raw.get("text", None)
    if text is None:
      raise ValueError("there is no text in raw data.")
    lang = raw.get("lang", "en")
    cutway = raw.get("cutter", "line")
    ref_text = ref.get("text", None)
    ref_wav = ref.get("wav", None) # wav16k
    ref_lang = ref.get("lang", "en")
    ssl_content = None
    phones_ref = None
    bert_ref = None
    if ref_text is None or ref_wav is None or len(ref_text.strip()) == 0:
      is_ref = False
      print("WARNING: there is no ref text or ref wav in ref dict.")
    if is_ref:
      assert isinstance(ref_text, str)
      ref_text = self.zh_tn_model.normalize(ref_text)
      assert isinstance(ref_wav, np.ndarray)
      if self.is_half:
        ref_wav = ref_wav.astype(np.float16)
      prompt_text = ref_text.strip("\n")
      if (prompt_text[-1] not in splits): 
        prompt_text += "。" if ref_lang != "en" else "."
      print("实际输入的参考文本:", prompt_text)
      with torch.no_grad():
        # wav16k, sr = librosa.load(ref_wav_path, sr=16000)
        wav16k = ref_wav
        if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
            print("WARNING: the ref wav is too short or too long. this will affect the inference time and result.")
        wav16k = torch.from_numpy(wav16k)
        zero_wav_torch = torch.from_numpy(self.zero_wav)
        print("ref wav shape:", wav16k.shape)
        wav16k = wav16k.to(self.device, dtype=self.dtype)
        zero_wav_torch = zero_wav_torch.to(self.device, dtype=self.dtype)
        wav16k = torch.cat([wav16k, zero_wav_torch])
        ssl_content = self.ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2).data.cpu()
        phones_ref, bert_ref, norm_text_ref = self.get_phones_and_bert(prompt_text, ref_lang, version)
        
    if 'zh' in lang or 'yue' in lang:
      text = self.zh_tn_model.normalize(text)
    print("cutmethod:", cutway)
    text = cutter[cutway](text)
    while "\n\n" in text:
        text = text.replace("\n\n", "\n")
    texts = text.split("\n")
    texts = self.process_text(texts)
    texts = self.merge_short_text_in_array(texts, 5)
    return texts, lang, ssl_content, phones_ref, bert_ref

  def _call_postprocess_step(self, text, lang, version, bert_ref=None):
      if (text[-1] not in splits): text += "。" if lang != "en" else "."
      phones, bert, norm_text = self.get_phones_and_bert(text, lang, version)
      if not bert_ref is None:
          bert = torch.cat([bert_ref, bert], 1)
      return bert.data, phones
    
  def _call_postprocess(self, texts, lang, ssl_content, phones_ref, bert_ref, **kwargs):
    version = kwargs.get("version", "v2")
    phones_list = []
    berts = []
    for idx, text in enumerate(texts):
        # 解决输入目标文本的空行导致报错的问题
        if (len(text.strip()) == 0):
            continue
        bert, phones = self._call_postprocess_step(text, lang, version, bert_ref)
        berts.append(bert.data.cpu())
        phones_list.append(phones)
    return berts, phones_list, ssl_content, phones_ref
        
  def __call__(self, raw:dict, ref:dict={}, **kwargs):
    texts, lang, ssl_content, phones_ref, bert_ref = self._call_preprocess(raw, ref, **kwargs)
    berts, phones_list, ssl_content, phones_ref = self._call_postprocess(texts, lang, ssl_content, phones_ref, bert_ref, **kwargs)
    return berts, phones_list, ssl_content, phones_ref
  
  @staticmethod
  def process_text(texts):
    _text=[]
    if all(text in [None, " ", "\n",""] for text in texts):
        raise ValueError("请输入有效文本")
    for text in texts:
        if text in  [None, " ", ""]:
            pass
        else:
            _text.append(text)
    return _text
  
  @staticmethod
  def merge_short_text_in_array(texts, threshold):
    if (len(texts)) < 2:
        return texts
    result = []
    text = ""
    for ele in texts:
        text += ele
        if len(text) >= threshold:
            result.append(text)
            text = ""
    if (len(text) > 0):
        if len(result) == 0:
            result.append(text)
        else:
            result[len(result) - 1] += text
    return result
  
  def extract_text_feature(self, text, language="zh", version="v2"):
    language = language.lower()
    phones, word2ph, norm_text = clean_text(text, language, version)
    phones = " ".join(phones)
    bert = self.get_bert_feature(norm_text, word2ph).data.cpu()
    return bert, phones, word2ph, norm_text
  
  def extract_hubert_ssl(self, wavpath, sr=None):
    wavnp, sr = librosa.load(wavpath, sr)
    tmp_max = np.abs(wavnp).max()
    # maxx = 0.95
    # alpha = 0.5
    # wavnp = (wavnp / tmp_max * (maxx * alpha*1145.14)) + ((1 - alpha)*1145.14) * wavnp
    if tmp_max > 1: wavnp = wavnp / tmp_max
    if sr != 16000:
      wav16k = librosa.resample(wavnp, sr, 16000)
    else:
      wav16k = wavnp
    wav16k = torch.from_numpy(wav16k).to(self.device, dtype=self.dtype)
    if sr != 32000:
      wav32k = librosa.resample(wavnp, sr, 32000)
    else:
      wav32k = wavnp
    ssl_content = self.ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2).data.cpu()
    return ssl_content, wav32k
  
  def get_bert_feature(self, text, word2ph):
    with torch.no_grad():
      inputs = self.tokenizer(text, return_tensors="pt")
      for i in inputs:
        inputs[i] = inputs[i].to(self.device)
      res = self.bert_model(**inputs, output_hidden_states=True)
      res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
    assert len(word2ph) == len(text)
    phone_level_feature = []
    for i in range(len(word2ph)):
      repeat_feature = res[i].repeat(word2ph[i], 1)
      phone_level_feature.append(repeat_feature)
    phone_level_feature = torch.cat(phone_level_feature, dim=0)
    return phone_level_feature.T
    
  def get_bert_inf(self, phones, word2ph, norm_text, language):
    language = language.replace("all_", "")
    if language == "zh":
        bert = self.get_bert_feature(norm_text, word2ph)
    else:
        bert = torch.zeros((1024, len(phones)))
    bert = bert.to(self.device, dtype=self.dtype)
    return bert

  def get_phones_and_bert(self, text, language, version="v2", final=False):
    if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
      language = language.replace("all_","")
      if language == "en":
        LangSegment.setfilters(["en"])
        formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
      else:
        # 因无法区别中日韩文汉字,以用户输入为准
        formattext = text
      while "  " in formattext:
        formattext = formattext.replace("  ", " ")
      if language == "zh":
        if re.search(r'[A-Za-z]', formattext):
          formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
          formattext = chinese.mix_text_normalize(formattext)
          return self.get_phones_and_bert(formattext, "zh", version)
        else:
          phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
          bert = self.get_bert_feature(norm_text, word2ph)
      elif language == "yue" and re.search(r'[A-Za-z]', formattext):
        formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
        formattext = chinese.mix_text_normalize(formattext)
        return self.get_phones_and_bert(formattext, "yue", version)
      else:
        phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
        bert = torch.zeros((1024, len(phones)))
    elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
      textlist=[]
      langlist=[]
      LangSegment.setfilters(["zh","ja","en","ko"])
      if language == "auto":
        for tmp in LangSegment.getTexts(text):
          langlist.append(tmp["lang"])
          textlist.append(tmp["text"])
      elif language == "auto_yue":
        for tmp in LangSegment.getTexts(text):
          if tmp["lang"] == "zh":
              tmp["lang"] = "yue"
          langlist.append(tmp["lang"])
          textlist.append(tmp["text"])
      else:
        for tmp in LangSegment.getTexts(text):
          if tmp["lang"] == "en":
            langlist.append(tmp["lang"])
          else:
            # 因无法区别中日韩文汉字,以用户输入为准
            langlist.append(language)
          textlist.append(tmp["text"])
      print(textlist)
      print(langlist)
      phones_list = []
      bert_list = []
      norm_text_list = []
      for i in range(len(textlist)):
        lang = langlist[i]
        phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
        bert = self.get_bert_inf(phones, word2ph, norm_text, lang)
        phones_list.append(phones)
        norm_text_list.append(norm_text)
        bert_list.append(bert)
      bert = torch.cat(bert_list, dim=1)
      # phones = sum(phones_list, [])
      phones = [item for sublist in phones_list for item in sublist] # 展开
      norm_text = ''.join(norm_text_list)
    if not final and len(phones) < 6:
      return self.get_phones_and_bert("." + text, language, version, final=True)
    return phones, bert.to(self.device, dtype=self.dtype), norm_text
    
# %% Inference Dedicated
class DedicatedPipeline(PipelineAbstract):
  def __init__(self, gpt_path=None, sovits_path=None, device="cuda:0", is_half=False, is_eval=True, sr=32000, init_model=True):
    super().__init__()
    # init path
    self.gpt_path = gpt_path
    self.sovits_path = sovits_path
    # init model
    self.is_init = False
    self.t2s_model = None
    self.vq_model = None
    # init config
    self.device = device
    self.is_half = is_half
    self.dtype = torch.float32
    if self.is_half:
      self.dtype = torch.float16
    self.is_eval = is_eval
    self.max_sec = 60
    self.hz = 50
    self.gpt_cfg = {}
    self.sovits_cfg = {}
    self.version = "v2"
    self.dict_language = [
            "all_zh",
            "en",
            "all_ja",
            "all_yue",
            "all_ko",
            "zh",
            "ja",
            "yue",
            "ko",
            "auto",
            "auto_yue",
          ]
    self.zero_wav = np.zeros( # 静音部分
          int(sr * 0.5),
          dtype=np.float16 if is_half else np.float32,
      )
    self.hann_window = None
    # init
    if init_model:
      self.init_model()
    
  def init_model(self):
    try:
      self.change_gpt_weights(self.gpt_path)
      self.change_sovits_weights(self.sovits_path)
      self.is_init = True
    except Exception as e:
      print(e)
      self.is_init = False
    
  def to_device(self, device):
    self.device = device
    self.t2s_model = self.t2s_model.to(device)
    self.vq_model = self.vq_model.to(device)
    return self
      
  def change_gpt_weights(self, gpt_path):
    if not os.path.exists(gpt_path):
      raise ValueError(f"gpt_path not exist: {gpt_path}")
    if os.path.isdir(gpt_path):
      ckpts = sorted(glob.glob(os.path.join(gpt_path, "*.ckpt")))
      if len(ckpts) == 0:
        raise ValueError(f"gpt_path not contains any ckpt: {gpt_path}")
      gpt_path = ckpts[-1]
    dict_s1 = torch.load(gpt_path, map_location="cpu")
    self.gpt_cfg = config = dict_s1["config"]
    self.max_sec = config["data"]["max_sec"] 
    t2s_model = Text2SemanticLightningModule(config, "****", is_train=not self.is_eval)
    print(t2s_model.load_state_dict(dict_s1["weight"], strict=False))    
    if self.is_half:
        t2s_model = t2s_model.half()
    self.t2s_model = t2s_model.to(self.device)
    if self.is_eval:
      self.t2s_model.eval()
    total = sum([param.nelement() for param in t2s_model.parameters()])
    self.gpt_path = gpt_path
    print("Number of parameter: %.2fM" % (total / 1e6))
  
  def change_sovits_weights(self, sovits_path):
    if not os.path.exists(sovits_path):
      raise ValueError(f"sovits_path not exist: {sovits_path}")
    if os.path.isdir(sovits_path):
      ckpts = sorted(glob.glob(os.path.join(sovits_path, "*.pth")))
      if len(ckpts) == 0:
        raise ValueError(f"sovits_path not contains any ckpt: {sovits_path}")
      sovits_path = ckpts[-1]
    dict_s2 = torch.load(sovits_path, map_location="cpu")
    self.sovits_cfg = hps = DictToAttrRecursive(dict_s2["config"])
    hps.model.semantic_frame_rate = "25hz"
    if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
        hps.model.version = "v1"
    else:
        hps.model.version = "v2"
    self.version = hps.model.version
    vq_model = SynthesizerTrn(
        hps.data.filter_length // 2 + 1,
        hps.train.segment_size // hps.data.hop_length,
        n_speakers=hps.data.n_speakers,
        **hps.model
    )
    if ("pretrained" not in sovits_path):
        del vq_model.enc_q
    if self.is_half:
        vq_model = vq_model.half()
    self.vq_model = vq_model.to(self.device)
    if self.is_eval:
      self.vq_model.eval()
    self.hann_window = torch.hann_window(hps.data.win_length)
    self.sovits_path = sovits_path
    print(vq_model.load_state_dict(dict_s2["weight"], strict=False))    
    
  def _call_preprocess(self, ssl_content, refers, refers_sr=16000, need_spepc=True):
    print("start dedicated pipeline!")
    if isinstance(ssl_content, np.ndarray):
      ssl_content = torch.from_numpy(ssl_content)
    if isinstance(ssl_content, torch.Tensor):
      codes = self.vq_model.extract_latent(ssl_content.to(self.device, dtype=self.dtype))
      prompt_semantic = codes[0, 0]
      prompt = prompt_semantic.unsqueeze(0).to(self.device)
    else:
      prompt = None
    if isinstance(refers, list) and need_spepc:
      refers = [self.get_spepc(wav, refers_sr).to(self.device, dtype=self.dtype) for wav in refers] # trans wav to spec
    print("extract latent done!")
    return prompt, refers
  
  def _call_step(self, bert, phonemes, prompt=None, refers=None, phones_ref=None, top_k=20, top_p=0.6, temperature=0.6, speed=1):
    if isinstance(bert, np.ndarray):
      bert = torch.from_numpy(bert)
    if isinstance(bert, torch.Tensor):
      bert = bert.to(self.device, dtype=self.dtype).unsqueeze(0)
    else:
      raise ValueError(f"bert type error!:{type(bert)}")
    phoneme_len: torch.Tensor
    if phones_ref is None:
      phoneme_ids = torch.LongTensor(phonemes).to(self.device).unsqueeze(0)
      phoneme_len = torch.tensor(0).to(self.device)
    else:
      phoneme_ids = torch.LongTensor(phones_ref + phonemes).to(self.device).unsqueeze(0)
      phoneme_len = torch.tensor(len(phones_ref)).to(self.device)
    # phoneme_len = torch.tensor(phoneme_ids.shape[-1]).to(self.device)
    with torch.no_grad():
      pred_semantic, idx = self.t2s_model.model.infer_panel(
          phoneme_ids,
          phoneme_len,
          prompt,
          bert,
          # prompt_phone_len=ph_offset,
          top_k=top_k,
          top_p=top_p,
          temperature=temperature,
          early_stop_num=self.hz * self.max_sec,
      )
      pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
      audio = self.vq_model.decode(pred_semantic, 
                                  torch.LongTensor(phonemes).to(self.device).unsqueeze(0), 
                                  refers,
                                  speed=speed).detach().cpu().numpy()[0, 0]
    max_audio = np.abs(audio).max()
    if max_audio>1:
      audio /= max_audio
    return (np.concatenate([audio, self.zero_wav]) * 32768).astype(np.int16)
      
  def __call__(self, berts, phones_list, ssl_content=None, phones_ref=None, top_k=20, top_p=0.6, temperature=0.6, speed=1, refers=None, refers_sr=16000):
    prompt, refers = self._call_preprocess(ssl_content, refers, refers_sr)
    audio_opt = []
    for bert, phonemes in zip(berts, phones_list):
      audio = self._call_step(bert, phonemes, prompt, refers, phones_ref, top_k, top_p, temperature, speed)
      audio_opt.append(audio)
    return np.concatenate(audio_opt, 0)
  
  def get_spepc(self, audio, org_sr=None):
    """
    获取spectrogram
    """
    hps:DictToAttrRecursive = self.sovits_cfg
    org_sr = org_sr if org_sr is not None else hps.data.sampling_rate
    audio = librosa.resample(audio, org_sr, hps.data.sampling_rate)
    maxx = np.abs(audio).max()
    if maxx>1:
      audio/=min(2,maxx)
    t_audio = torch.from_numpy(audio).unsqueeze(0)
    spec = self.spectrogram_torch(
        t_audio,
        hps.data.filter_length,
        hps.data.sampling_rate,
        hps.data.hop_length,
        hps.data.win_length,
        center=False,
    )
    return spec
  
  def spectrogram_torch(self, y, n_fft, sampling_rate, hop_size, win_size, center=False):
      if self.hann_window is None:
        raise ValueError("hann_window is None, please call load sovits weights first")
      y = y[None, ...]
      y = torch.nn.functional.pad(
          y,
          (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
          mode="reflect",
      )
      y = y.squeeze(1)
      spec = torch.stft(
          y,
          n_fft,
          hop_length=hop_size,
          win_length=win_size,
          window=self.hann_window,
          center=center,
          pad_mode="reflect",
          normalized=False,
          onesided=True,
          return_complex=False,
      )
      spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
      return spec
  
# %% ASR pipeline (funasr)
class ASRPipeline(PipelineAbstract):
  def __init__(self, 
               model_path = "iic/SenseVoiceSmall",
               vad_path = "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", # Voice Activity Detection Model !!PUNC需要它！
               punc_path = "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
               device="cpu",
               init_model=False,
               ):
    self.model_path = model_path
    self.vad_path = vad_path
    self.punc_path = punc_path
    self.device = torch.device(device) if isinstance(device, str) else device
    self.is_init = False
    self.slice_cfg = {
      "threshold" : -40,
      "min_length" : 5000,
      "min_interval" : 300,
      "hop_size" : 10,
      "max_sil_kept" : 500,
    }
    if init_model:
      self.init_model()
      
  def init_model(self):
    if self.device.type == "cuda":
      if self.device.index is None:
        device_tag = "cuda:0"
      else:
        device_tag = f"cuda:{self.device.index}"
    else:
      device_tag = "cpu"
    self.model = AutoModel(
          model = self.model_path,
          vad_model = self.vad_path,
          punc_model= self.punc_path,
          device=device_tag,
          disable_update=True,
      )
    self.is_init = True
    
  def denoise_wav(self, wavlist):
    path_denoise  = "damo/speech_frcrn_ans_cirm_16k"
    ans = pipeline(Tasks.acoustic_noise_suppression,model=path_denoise)
    # print(input_folder)
    # print(list(os.listdir(input_folder).sort()))
    for file in wavlist:
        try:
            ans(file, output_path=file)
        except:
            traceback.print_exc()
    
  def slice_wav(self, wavpath, is_denoise=True):
    stempath = os.path.dirname(wavpath)
    wavname = os.path.basename(wavpath).split(".")[0]
    todir = os.path.join(stempath, wavname)
    os.makedirs(todir, exist_ok=True)
    waveform, sr = librosa.load(wavpath, sr=None)
    slicer = Slicer(
            sr=sr,
            **self.slice_cfg
        )
    chunks = slicer.slice(waveform)
    wavlist = []
    durations = []
    for chunk, start, end in chunks:
        tmp_max = np.abs(chunk).max()
        if(tmp_max>1):
          chunk /= tmp_max
        topath = "%s/%s_%010d_%010d.wav" % (todir, wavname, start, end)
        durations.append((end - start) / sr)
        wavfile.write(
            topath,
            sr,
            (chunk * 32767).astype(np.int16),
        )
        wavlist.append(topath)
    if is_denoise:
      self.denoise_wav(wavlist)
    return wavlist, durations
    
  def __call__(self, wavpaths, is_slice=True):
    if isinstance(wavpaths, str):
      if os.path.isdir(wavpaths):
        wavpaths = [os.path.join(wavpaths, x) for x in os.listdir(wavpaths)]
        durations = []
        for file_path in wavpaths:
          with sf.SoundFile(file_path) as f:
            durations.append(len(f) / f.samplerate)
      elif os.path.isfile(wavpaths):
        if is_slice:
          wavpaths, durations = self.slice_wav(wavpaths)
        else:
          with sf.SoundFile(wavpaths) as f:
            durations = [len(f) / f.samplerate]
          wavpaths = [wavpaths]
      else:
        raise ValueError(f"invalid wavpath: {wavpaths}")
    assert isinstance(wavpaths, list)
    allres = self.model.generate(
              input=wavpaths,
              cache={},
              language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
              use_itn=False,
              batch_size_s=60,
              merge_vad=True,  #
              merge_length_s=15,
          )
    outputs = []
    for idx, res in enumerate(allres):
      name = res.get("key")
      if not name.endswith(".wav"):
        name += ".wav"
      text = res.get("text", None)
      if text is None:
        continue
      output = self.extract_result_asr(text)
      output.update({"filename": name, "duration": durations[idx]})
      outputs.append(output)
    return outputs      
    
  @staticmethod
  def extract_result_asr(text):
  # Regular expression pattern
    pattern = r'<\|(?P<lang>.*?)\|><\|(?P<emo>.*?)\|><\|(?P<event>.*?)\|><\|(?P<itn>.*?)\|>(?P<content>.*?)((?=<\|)|$)'
    # Find all matches
    matches = re.finditer(pattern, text)
    # Extract and print the components
    for match in matches: # 输出第一个匹配
      lang = match.group('lang')
      emo = match.group('emo')
      event = match.group('event')
      itn = match.group('itn')
      content = match.group('content')
      return {
        'lang': lang,
        'emo': emo,
        'event': event,
        'itn': itn=="withitn",
        'content': content
      }
  
# %% Finetune pipeline
class FineTunePipeline(PipelineAbstract):
  def __init__(self, basepath="", 
               s1_pretrained="GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
               s2_pretrained_G="GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
               s2_pretrained_D="GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2D2333k.pth",
               device="cpu",
               init_model=False,
               is_half=False,
               ):
    self.userpath = basepath # set to oss?
    self.name = None
    s2_cfg = "GPT_SoVITS/configs/s2.json"
    self.s2_cfg = get_hparams_from_file(s2_cfg)
    self.s1_pretrained = s1_pretrained
    self.s2_pretrained_G = s2_pretrained_G
    self.s2_pretrained_D = s2_pretrained_D
    self.device = torch.device(device)
    self.is_half = is_half
    self.dtype = torch.float16 if is_half else torch.float32
    self.vq_model = None
    self.is_init = False
    if init_model:
      self.init_model()
      
  def init_model(self):
    hps = self.s2_cfg
    vq_model = SynthesizerTrn(
        hps.data.filter_length // 2 + 1,
        hps.train.segment_size // hps.data.hop_length,
        n_speakers=hps.data.n_speakers,
        **hps.model
    )
    if self.is_half:
        vq_model = vq_model.half()
    vq_model.eval()
    print(
        vq_model.load_state_dict(
            torch.load(self.s2_pretrained_G, map_location="cpu")["weight"], strict=False
        )
    )
    self.vq_model = vq_model.to(self.device)
    # vq_model.quantizer.vq.layers[0]._codebook.inited[:] = 1.
    self.is_init = True
    
  def change_work_dir(self, name=None):
    self.name = name
    self.speaker_path = os.path.join(self.userpath, name)
    self.bert_path = os.path.join(self.speaker_path, "3-bert")
    self.hubert_path = os.path.join(self.speaker_path, "4-cnhubert")
    self.wav32k_path = os.path.join(self.speaker_path, "5-wav32k")
    self.logs_s2 = os.path.join(self.speaker_path, "logs_s2")
    self.logs_s1 = os.path.join(self.speaker_path, "logs_s1")
    os.makedirs(self.bert_path, exist_ok=True)
    os.makedirs(self.hubert_path, exist_ok=True)
    os.makedirs(self.wav32k_path, exist_ok=True)
    os.makedirs(self.speaker_path, exist_ok=True)
    os.makedirs(self.logs_s2, exist_ok=True)
    os.makedirs(self.logs_s1, exist_ok=True)
    
  def dump_cfg_s2(self, epochs=8, batch_size=16, version="v2"):
    if self.name is None:
      raise ValueError("select work_dir first")
    hps = copy(self.s2_cfg)
    hps.train.batch_size = batch_size
    hps.train.epochs = epochs
    hps.train.pretrained_s2G = self.s2_pretrained_G
    hps.train.pretrained_s2D = self.s2_pretrained_D
    hps.train.if_save_latest = True
    hps.train.if_save_every_weights = True
    hps.train.save_every_epoch = 4
    hps.train.gpu_numbers = "0" # single gpu
    hps.data.exp_dir = self.speaker_path
    hps.model.version = version
    hps.s2_ckpt_dir = self.speaker_path
    hps.version = version
    hps.save_weight_dir = os.path.join(self.speaker_path, f"logs_s2/ckpt_{hps.version}")
    os.makedirs(hps.save_weight_dir, exist_ok=True)
    hps.name = self.name
    jdata = hps.to_dict()
    with open(os.path.join(self.speaker_path, "config_s2.json"), "w") as f:
      json.dump(jdata, f)
      
  def dump_cfg_s1(self, template="s1longer-v2.yaml"):
    if self.name is None:
      raise ValueError("select work_dir first")
    with open(os.path.join("GPT_SoVITS/configs", template), "r") as f:
      jdata = yaml.load(f, Loader=yaml.FullLoader)
    hps = DictToAttrRecursive(jdata)
    hps.train.exp_name = self.name
    hps.train.half_weights_save_dir = os.path.join(self.speaker_path, "logs_s1/ckpt_half")
    hps.train.if_dpo = False
    hps.train.if_save_every_weights = True
    hps.train.if_save_latest = True
    hps.train.save_every_n_epoch = 10
    hps.output_dir = f"{self.name}/logs_s1"
    model_path = os.environ.get("MODEL_PATH", "/models")
    hps.pretrained_s1 = os.path.join(model_path, "gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt")
    hps.train_phoneme_path = os.path.join(self.speaker_path, "2-name2text.txt")
    hps.train_semantic_path = os.path.join(self.speaker_path, "6-name2semantic.tsv")
    jdata = hps.to_dict()
    with open(os.path.join(self.speaker_path, "config_s1.yaml"), "w") as f:
      yaml.dump(jdata, f)
    
  def preprocess(self, gpl:GeneralPipeline, asr_res_path=None):
    if self.name is None:
      raise ValueError("select work_dir first")
    if asr_res_path is None:
      asr_res_path = os.path.join(self.speaker_path, "wavfiles/asr.txt")
    with open(asr_res_path,"r",encoding="utf8")as f:
      lines = f.read().strip("\n").split("\n")
    texts_opt = []
    # semantic_opt = ["item_name\tsemantic_audio"] # tsv header
    semantic_opt = []
    for idx, line in enumerate(lines):
      try:
        wavpath, emotion, language, text = line.split("|")
        wavname = os.path.basename(wavpath)
        if not wavpath.endswith(".wav"):
          wavpath = wavpath + ".wav"
        bert, phones, word2ph, norm_text = gpl.extract_text_feature(text, language)
        texts_opt.append("%s\t%s\t%s\t%s" % (wavname, phones, word2ph, norm_text))
        # wav, sr = librosa.load(wavpath, sr=32000) # default sr=32000
        sr = 32000
        ssl_content, wav32k = gpl.extract_hubert_ssl(wavpath, sr)
        if self.is_init:
          codes = self.vq_model.extract_latent(ssl_content.to(self.device, dtype=self.dtype))
          semantic = " ".join([str(i) for i in codes[0, 0, :].tolist()])
          semantic_opt.append("%s\t%s" % (wavname, semantic))
        torch.save(bert, os.path.join(self.bert_path, wavname + ".pt"))
        torch.save(ssl_content, os.path.join(self.hubert_path, wavname + ".pt"))
        wavname = wavname if wavname.endswith(".wav") else wavname + ".wav"
        sf.write(os.path.join(self.wav32k_path, wavname), wav32k, 32000)
      except:
          print(idx, line, traceback.format_exc())
    with open(os.path.join(self.speaker_path, "2-name2text.txt"), "a", encoding="utf8") as f:
      f.write("\n".join(texts_opt) + "\n")
    name2semantic = os.path.join(self.speaker_path, "6-name2semantic.tsv")
    if not os.path.exists(name2semantic):
      semantic_opt = ["item_name\tsemantic_audio"] + semantic_opt
    with open(name2semantic, "a", encoding="utf8") as f:
      f.write("\n".join(semantic_opt) + "\n")
    self.dump_cfg_s1()
    self.dump_cfg_s2()
    