import sys, os
sys.path.append(os.path.join(os.getcwd()))
sys.path.append(os.path.join(os.getcwd(), "GPT_SoVITS"))
os.environ["PYTHONPATH"] = os.environ.get("PYTHONPATH", "") + ":" + os.getcwd() + ":" + os.path.join(os.getcwd(), "GPT_SoVITS")

from fastapi import FastAPI, Request
import ray, random, glob, io, base64, asyncio
from ray import serve
from ray.util.queue import Queue
import torch, time, json
import soundfile as sf
from gpustats import Monitor
from GPT_SoVITS.pipelines import GeneralPipeline, DedicatedPipeline, ASRPipeline, FineTunePipeline
import librosa, subprocess
from env import user_path, hubert_path, bert_path, gpt_weight_path, sovits_weight_path, asr_model_path, asr_vad_path, asr_punc_path, s1_pretrained, s2_pretrained_G, s2_pretrained_D
from fastapi import FastAPI, UploadFile, File, HTTPException, Form
from fastapi.responses import StreamingResponse
from threading import Timer
import dto
monitor = Monitor()

# init ray
ray.init(ignore_reinit_error=True)

# fastapi
app = FastAPI()

# init pipeline in main process
gpl = ray.put(GeneralPipeline(hubert_path, bert_path, is_half=True, init_model=False, device=torch.device("cpu")))
dpl = ray.put(DedicatedPipeline(gpt_weight_path, sovits_weight_path, init_model=False, is_half=True, device=torch.device("cpu")))
apl = ray.put(ASRPipeline(asr_model_path, asr_vad_path, asr_punc_path, init_model=False, device=torch.device("cpu")))
fpl = ray.put(FineTunePipeline(user_path, s1_pretrained, s2_pretrained_G, s2_pretrained_D, is_half=True, init_model=False, device=torch.device("cpu")))

# function
def auto_set_device(th_load=0.6, th_mem=10480):
  valid_gpus = monitor.filter(th_load, th_mem)
  if len(valid_gpus) == 0:
    raise RuntimeError("No GPU device is available!")
  os.environ["CUDA_VISIBLE_DEVICES"] = str(random.choice(valid_gpus).index)
  return torch.device("cuda")

def auto_set_device_env(th_load=0.6, th_mem=10480, num_gpus=1):
  env = os.environ.copy()
  valid_gpus = monitor.filter(th_load, th_mem)
  if len(valid_gpus) == 0:
    raise RuntimeError("No GPU device is available!")
  env["CUDA_VISIBLE_DEVICES"] = ",".join([str(gpu.index) for gpu in random.choices(valid_gpus, k=num_gpus)])
  return env

def get_default_reference(asr_result):
  contents_short = [x for x in asr_result if 3 <= float(x[4]) <= 6]
  if len(contents_short) == 0:
    print("no short content found")
    contents_short = asr_result
  randidx = random.randint(0, len(contents_short) - 1)
  print(f"random idx: {randidx}/{len(contents_short)}")
  content = contents_short[randidx]
  reference = {
        "emo": "neutral",
        "text": content[3],
        "lang": content[2],
        "path": content[0],
      }
  return reference

# inference with new process
@ray.remote(num_gpus=1, max_retries=3)
def gpl_infer(gpl:GeneralPipeline, raw_data, ref_data):
  device = auto_set_device()
  gpl.to_device(device)
  if ref_path:=ref_data.get("path"):
    ref_data["wav"], ref_data["sr"] = librosa.load(ref_path, sr=16000)
  berts, phones_list, ssl_content, phones_ref = gpl(raw_data, ref_data)
  out = (berts, phones_list, ssl_content, phones_ref, ref_data.get("wav", None), 16000)
  return out

@ray.remote(num_gpus=1, max_retries=3)
def dpl_infer(modelstem, dpl:DedicatedPipeline, berts, phones_list, ssl_content, phones_ref, wav16k, sr, refers=[]):
  device = auto_set_device()
  print(modelstem)
  gpt_weight_path = os.path.join(modelstem, "logs_s1/ckpt_half")
  sovits_weight_path = os.path.join(modelstem, "logs_s2/ckpt_v2")
  dpl.change_gpt_weights(gpt_weight_path)
  dpl.change_sovits_weights(sovits_weight_path)
  dpl.to_device(device)
  refers = refers if len(refers) > 0 else [wav16k]
  with torch.no_grad():
    wav = None
    if not ssl_content is None:
      wav = dpl(berts, phones_list, ssl_content, phones_ref, refers=refers, refers_sr=sr)
    else:
      wav = dpl(berts, phones_list)
  output_file = os.path.join(modelstem, f"outputfiles/{time.time_ns()}.wav")
  os.makedirs(os.path.dirname(output_file), exist_ok=True)
  sf.write(output_file, wav, dpl.sovits_cfg['data']['sampling_rate'])
  return output_file

@ray.remote(num_gpus=1, max_retries=3)
def asr_infer(apl:ASRPipeline, wavpath, userid=None, auto_save=True, auto_slice=True):
  device = auto_set_device()
  apl.to_device(device)
  print(device)
  wavfile = os.path.basename(wavpath)
  wavname = wavfile.split(".")[0]
  if userid is None:
    wavfolder = os.path.dirname(wavpath)
  else:
    wavfolder = os.path.join(user_path, f"{userid}/wavfiles")
  resall = apl(wavpath, auto_slice)
  if not auto_save:
    return resall, None
  wlines = []
  wlines_ref = []
  sliced_folder = os.path.join(wavfolder, wavname)
  os.makedirs(sliced_folder, exist_ok=True)
  for r in resall:
    sliced_path = os.path.join(sliced_folder, r["filename"])
    # wlines.append(f'{sliced_path}|{wavname}|{r["lang"]}|{r["content"]}\n')
    wlines.append(f'{sliced_path}|{r["emo"]}|{r["lang"]}|{r["content"]}\n')
    wlines_ref.append(f'{sliced_path}|{r["emo"]}|{r["lang"]}|{r["content"]}|{r["duration"]}\n')
  asr_path = os.path.join(wavfolder, "asr.txt")
  with open(asr_path, "a", encoding="utf8") as f:
    f.writelines(wlines)  
  # asr content with duration
  ref_path = os.path.join(wavfolder, "ref.txt")
  with open(ref_path, "a", encoding="utf8") as f:
    f.writelines(wlines_ref)
  return resall, asr_path

@ray.remote(num_gpus=1, max_retries=3)
def finetune(fpl:FineTunePipeline, gpl:GeneralPipeline, name):
  device = auto_set_device()
  fpl.to_device(device)
  gpl.to_device(device)
  fpl.change_work_dir(name)
  fpl.preprocess(gpl)
  
  s1_path = glob.glob(f"{fpl.speaker_path}/logs_s1/ckpt/*")
  s2_path = glob.glob(f"{fpl.speaker_path}/logs_s2/ckpt_v2/*")
  for path in s1_path+s2_path:
    os.remove(path)
    print(f"remove {path}")
    
  env = auto_set_device_env()
  p2 = subprocess.Popen(["python", os.path.realpath("GPT_SoVITS/s2_train.py"),
                  "--config", f"{fpl.speaker_path}/config_s2.json",], cwd=user_path, env=env, stderr=subprocess.PIPE)
  
  env = auto_set_device_env()
  p1 = subprocess.Popen(["python", os.path.realpath("GPT_SoVITS/s1_train.py"),
                  "--config", f"{fpl.speaker_path}/config_s1.yaml",], cwd=user_path, env=env, stderr=subprocess.PIPE)
  
  p1.wait()
  p2.wait()
  
  if p2.returncode != 0:
    _, stderr = p2.communicate()
    return False, f"s2_train.py failed with {stderr.decode()}!"
  
  if p1.returncode != 0:
    _, stderr = p1.communicate()
    return False, f"s1_train.py failed with {stderr.decode()}!"
  return True, "success"

# %% Persistent Connection Actor
@ray.remote(num_gpus=1, max_restarts=3)
class PersistentService:
  def __init__(self, client_id, user_id, timeout: int = 300, sr=16000):
    device = auto_set_device()
    self.user_id = user_id
    self.client_id = client_id
    self.sr = sr
    self.gpl = GeneralPipeline(hubert_path, bert_path, is_half=True, init_model=True, device=device)
    gpt_fpath = f"{user_path}/{user_id}/logs_s1/ckpt_half"
    sovits_fpath = f"{user_path}/{user_id}/logs_s2/ckpt_v2"
    self.dpl = DedicatedPipeline(gpt_fpath, sovits_fpath, init_model=True, is_half=True, device=device)
    self.timeout = timeout
    self.timer = None
    self.refers_spec = []
    self.ref_contents = []
    self.reset_timer()
    
  def is_initialized(self):
    asrtxt = os.path.join(user_path, self.user_id, "wavfiles/ref.txt")
    if os.path.exists(asrtxt):
      with open(asrtxt, "r", encoding="utf8") as f:
        self.ref_contents = [x.strip().split("|") for x in f.readlines()]
    refers_orig = [librosa.load(content[0], sr=self.sr)[0] for content in self.ref_contents]
    self.refers_spec = [self.dpl.get_spepc(wav, 16000).to(self.dpl.device, dtype=self.dpl.dtype) for wav in refers_orig]
    return True
    
  def reset_timer(self):
    if self.timer is not None:
        self.timer.cancel()  # 取消之前的定时器
    if self.timeout > 0:
      self.timer = Timer(self.timeout, self.destroy)  # 设置新定时器
      self.timer.start()

  def pause_timer(self):
    if self.timer is not None:
        self.timer.cancel()
        self.timer = None
  
  async def tts_stream(self, raw_data ,ref_data):
    self.pause_timer()
    if ref_data is None:
      ref_data = get_default_reference(self.ref_contents)
    texts, lang, ssl_content, phones_ref, bert_ref = self.gpl._call_preprocess(raw_data, ref_data)
    if fileid := ref_data.get("fileid"):
      ref_data["path"] = os.path.join(user_path, self.user_id, "wavfiles", fileid + ".wav")
    prompt, refers = self.dpl._call_preprocess(ssl_content, self.refers_spec, self.sr, need_spepc=False)
    if len(texts) == 0:
      yield json.dumps({"data": "", "isFinished": True}) + "\n"
    for idx, text in enumerate(texts):
      # 解决输入目标文本的空行导致报错的问题
      if (len(text.strip()) == 0):
          continue
      bert, phones = self.gpl._call_postprocess_step(text, lang, "v2", bert_ref)
      aud = self.dpl._call_step(bert, phones, prompt, refers, phones_ref, top_k=20, top_p=0.6, temperature=0.6, speed=1)
      with io.BytesIO() as f:
        sf.write(f, aud, self.sr, format='WAV')
        res = {"data": base64.b64encode(f.getvalue()).decode("utf-8"), 
               "sr": self.dpl.sovits_cfg.data.sampling_rate,
               "isFinished": idx==(len(texts)-1)
               }
        yield json.dumps(res) + "\n"
    self.reset_timer()
        
  def destroy(self):
    if self.timer is not None:
      self.timer.cancel()
      self.timer = None
    print(f"destroying {self.client_id} for {self.timeout} seconds timeout.")
    ray.kill(ray.get_actor(self.client_id))
    
    
## % Entrypoint Class
@serve.deployment(
        graceful_shutdown_timeout_s = 1200,
        graceful_shutdown_wait_loop_s = 60,
        ray_actor_options={"num_cpus": 0.5},  # entrypoint is CPU-bound
        autoscaling_config={
            "target_num_ongoing_requests_per_replica": 1, 
            "min_replicas": 1, 
            "initial_replicas": 2,
            "max_replicas": 4,
    })
@serve.ingress(app)
class Entrypoint:
    def __init__(self):
      self.gpl = gpl
      self.dpl = dpl
      self.apl = apl
      self.fpl = fpl
            
    @app.post("/attach")
    async def attach(self, request:dto.StreamAttachReq):
      data = request.model_dump()
      if not (client_id:=data.get("client_id")): # 如无则随机生成client_id
        client_id = "".join(random.sample("0123456789abcdef", 8))
      if (user_id := data.get("user_id")) is None:
        return {"isSuccess": False, "reason": "user_id is required", "client_id":client_id}
      try:
        actor = ray.get_actor(client_id)   
        reason = "already exist"
      except:
        actor = PersistentService.options(name=client_id, lifetime="detached").remote(client_id, user_id, 60)
        reason = "success"
      return {"isSuccess": ray.get(actor.is_initialized.remote()), "reason": reason, "client_id":client_id}
    
    @app.post("/stream")
    async def stream(self, request:dto.StreamTextToSpeechReq):
      data = request.model_dump()
      reference = data.get("reference")
      client_id = data.get("client_id")
      target = data.get("target")
      actor = ray.get_actor(client_id)
      async def stream_chunks():
        # Wrap the remote generator locally
        async for res in actor.tts_stream.remote(target, reference):
            yield await res
      return StreamingResponse(stream_chunks(), media_type="application/json")
    
    @app.post("/detach")
    async def detach(self, request:dto.StreamDetachReq):
      data = request.model_dump()
      client_id = data.get("client_id")
      try:
        actor = ray.get_actor(client_id)
        ray.kill(actor)
        return {"isSuccess": True, "reason": "success"}
      except Exception as e:
        print(e)
        return {"isSuccess": False, "reason": str(e)}
            
    @app.post("/tts")
    async def text2speech(self, request:dto.TextToSpeechReq):
      data = request.model_dump()
      if (target := data.get("target")) is None:
        return {"isSuccess": False, "reason": "target is required"}
      if (userid := data.get("userid")) is None:
        return {"isSuccess": False, "reason": "userid is required"}
      reference = data.get("reference")      
      asrtxt = os.path.join(user_path, userid, "wavfiles/ref.txt")
      if os.path.exists(asrtxt):
        with open(asrtxt, "r", encoding="utf8") as f:
          contents = [x.strip().split("|") for x in f.readlines()]
      else:
        print(f"no path {asrtxt} content found")
        contents = []
      
      if reference is None:
        if len(contents) == 0:
          return {"isSuccess": False, "reason": "no reference content found"}
        reference = get_default_reference(contents)
      else:
        if reference.get("path") is None:
          if fileid:=reference.get("fileid"):
            reference["path"] = os.path.join(user_path, userid, "wavfiles", fileid + ".wav")
          else:
            reference.update(get_default_reference(contents))
        if reference.get("text") is None:
          future = asr_infer.remote(self.apl, reference.get("path"), userid, auto_save=False, auto_slice=False)
          asrres, _ = ray.get(future)
          reference["text"] = asrres[0]['content']
          
      refers = []
      if voice:=reference.get("voice"):
        voice_wav_path = os.path.join(user_path, voice.get("userid", "unknown"), "wavfiles", voice.get("fileid", "unknown"))
        wavs = glob.glob(os.path.join(voice_wav_path, "*.wav"))
        refers = [librosa.load(wav, sr=None)[0] for wav in wavs]
        if len(wavs)==0 and os.path.isfile(voice_wav_path+".wav"):
          refers = [librosa.load(voice_wav_path+".wav", sr=None)[0]]
      if len(refers) == 0:
        refers = [librosa.load(content[0], sr=16000)[0] for content in contents if content[1] == reference.get("emo", "neutral")]
      print("contains reference count:", len(refers))
      
      t0 = time.time()
      start_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t0))
      future = gpl_infer.remote(self.gpl, target, reference) 
      berts, phones_list, ssl_content, phones_ref, wav16k, sr = ray.get(future)
      modelstem = os.path.join(user_path, userid)
      future = dpl_infer.remote(modelstem, self.dpl, berts, phones_list, ssl_content, phones_ref, wav16k, sr, refers)
      output_file = ray.get(future) 
      t1 = time.time()
      end_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t1))
      duration = t1 - t0
      return {"isSuccess": True, "start": start_time, "end": end_time, "duration": duration, "filepath": output_file}
        
    @app.post("/preprocess")
    async def enter(self, data:dto.AsrReq):
      fileid = data.fileid
      userid = data.userid
      wavpath = os.path.join(user_path, userid, "wavfiles", fileid + ".wav")
      print(f"ASR path: {wavpath}")
      future = asr_infer.remote(self.apl, wavpath, userid, data.enter, data.slice)
      asr_res, asr_res_path = ray.get(future)
      return {"result":asr_res}
        
    @app.post("/finetune")
    async def finetune_asr(self, request:dto.FinetuneReq):
      input_data = request.model_dump()
      userid = input_data.get("userid")
      if fileid := input_data.get("fileid"):
        wavpath = os.path.join(user_path, userid, "wavfiles", fileid + ".wav")
        print(f"Finetune ASR path: {wavpath}")
        future = asr_infer.remote(self.apl, wavpath, userid)
        asr_res, asr_res_path = ray.get(future)
      future = finetune.remote(self.fpl, self.gpl, userid)
      ret, msg = ray.get(future)
      return {"isSuccess": ret, "reason": msg}
      
    @app.post("/upload")
    async def upload_audio(self, file: UploadFile = File(...), userid: str = Form()):
        audio_bytes = await file.read()  # Read the file bytes
        fileid = "".join(random.sample("0123456789abcdef", 8))
        filepath = f"{user_path}/{userid}/wavfiles/{fileid}.wav"
        os.makedirs(os.path.dirname(filepath), exist_ok=True)
        with open(filepath, "wb") as f:
            f.write(audio_bytes)
        print(f"Write in {filepath}")
        return {"userid": userid, "fileid": fileid, "size": len(audio_bytes)}

if __name__ == "__main__":
  # Initialize deploy
  serve.start(http_options={"host": "0.0.0.0", "port": 8000})
  main = Entrypoint.bind()
  # Deploy the model
  serve.run(main, route_prefix="/", blocking=True)