from pocketsphinx import LiveSpeech
import numpy as np
from typing import List, Literal, Optional, Union
import time
import asyncio
from threading import Thread
from tool.tool_register import dispatch_tool, get_op_tools,get_lm4_tools,nested_object_to_dict,wcf
import json
import openai
from zhipuai import ZhipuAI
import whisper  
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import zhconv
import vits.commons as commons
import vits.utils as vitsUtil
from vits.models import SynthesizerTrn
from vits.text.symbols import symbols
from vits.text import text_to_sequence
import os,sys
import torch
from torch import no_grad, LongTensor
import sounddevice as sd
import pyaudio
import queue
import re
from wcferry import Wcf, WxMsg
import logging
from queue import Empty
from configuration import Config
from utils import split_language
import asyncio
import concurrent.futures
import time




def traditional_to_simplified(text):
    # 调用convert函数将繁体字转换为简体字
    simplified_text = zhconv.convert(text, 'zh-hans')
    return simplified_text

class ChatRobot:
    def __init__(self):
        self.speech=None
        if Config().LIVE_SPEECH:
            self.speech = LiveSpeech(
                    verbose=False,
                    sampling_rate=16000,
                    buffer_size=2048,
                    no_search=False,
                    full_utt=False,
                    hmm=Config().LIVE_SPEECH['hmm'],
                    lm=Config().LIVE_SPEECH['lm'],
                    dic=Config().LIVE_SPEECH['dic'],
                )
            self.wake_words=Config().LIVE_SPEECH['wakeWords'] #唤醒词
            self.speechType=Config().CHAT_ROBOT['speechType']
            if self.speechType=='whisper' and Config().WHISPER:
                self.model = whisper.load_model(Config().WHISPER['modelType'])  #small #medium #large
            else:
                self.model = pipeline(
                    task=Tasks.auto_speech_recognition,
                    model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
        else:
             # 创建线程池
            self.executor = concurrent.futures.ThreadPoolExecutor()
        self.LOG = logging.getLogger("ChatRobot")
        self.awaken=False # 唤醒状态
        self.is_chart=False # 是否是聊天状态
        self.record_time_out=Config().CHAT_ROBOT.get('recordTimeOut',5) #超时时间
        self.chat_size_out=Config().CHAT_ROBOT.get('chatSizeOut',3) #录音等待次数
        self.refresh_time=0 #刷新时间
        self.loop = asyncio.get_event_loop()
        self.history=[{"role":"system","content":"你是windows电脑助手，你的任务是为用户提供专业、准确、有见地且简洁建议。回复内容里所有文件名称和链接地址都用()包起来。文件默认存储路径是："+Config().CHAT_ROBOT['basePath']}]
        self.clear_history=False
        self.audio_size=0
        #消息列表
        self.msg_list=[]
        self.is_new_msg=False
        # 设置PyAudio参数
        self.format = pyaudio.paInt16
        self.channels = 1
        self.rate = 16000
        self.chunk = 1024
        #GPT设置
        if Config().OPENAI:
            openai.api_base = Config().OPENAI['api_base']
            openai.api_key = Config().OPENAI['api_key']
        if Config().CHAT_ROBOT['apiType']=='glm4':
            self.client = ZhipuAI(api_key=Config().GLM4['model']) # 请填写您自己的APIKey
        #语音合成
        self.device = ("cuda:0"if torch.cuda.is_available() else ("mps"
                if sys.platform == "darwin" and torch.backends.mps.is_available() else "cpu") )
        #判断模型路径是否存在，查找目录下.pth和.json文件
        if Config().VITS:
            #遍历目录
            pthPath=''
            confPath=''
            for root, dirs, files in os.walk(Config().VITS['modelPath']):
                for file in files:
                    if file.endswith('.pth'):
                        pthPath = os.path.join(root, file)
                    elif file.endswith('.json'):
                        confPath = os.path.join(root, file)
            self.vitsHps = vitsUtil.get_hparams_from_file(confPath)
            self.vitsNet = SynthesizerTrn(
                    len(symbols),
                    self.vitsHps.data.filter_length // 2 + 1,
                    self.vitsHps.train.segment_size // self.vitsHps.data.hop_length,
                    n_speakers=self.vitsHps.data.n_speakers,
                    **self.vitsHps.model,
                )
            self.vitsNet.eval().to(self.device)
            self.speakers = list(self.vitsHps.speakers)
            vitsUtil.load_checkpoint(pthPath, self.vitsNet)
            self.speaker=Config().VITS['speaker']
            self.language = Config().VITS['language']


    def enableReceivingMsg(self) -> None:
        def innerProcessMsg(wcf: Wcf):
            while wcf.is_receiving_msg():
                try:
                    msg = wcf.get_msg()
                    #self.LOG.info(msg)
                    self.processMsg(msg)
                except Empty:
                    continue  # Empty message
                except Exception as e:
                    self.LOG.error(f"Receiving message error: {e}")

        wcf.enable_receiving_msg()
        Thread(target=innerProcessMsg, name="GetMessage", args=(wcf,), daemon=True).start()

    def processMsg(self, msg: WxMsg) -> None:
        """当接收到消息的时候，会调用本方法。如果不实现本方法，则打印原始消息。
        此处可进行自定义发送的内容,如通过 msg.content 关键字自动获取当前天气信息，并发送到对应的群组@发送者
        群号：msg.roomid  微信ID：msg.sender  消息内容：msg.content
        content = "xx天气信息为："
        receivers = msg.roomid
        self.sendTextMsg(content, receivers, msg.sender)
        """
        if msg.type not in([0x01,0x03,0x34,0x37,0x47,822083633,1090519089]): 
            #文字，图片，语音，好友申请，表情图片,引用消息，文件
            return
        if msg.from_group():
            return
        self.msg_list.append(msg)
        self.is_new_msg=True
        if self.is_chart:
            return 
        else:
            self.awaken_speech()
        # 群聊消息
        #if msg.from_group():
            # # 如果在群里被 @
            # if msg.roomid not in self.config.GROUPS:  # 不在配置的响应的群列表里，忽略
            #     return
            
            # if msg.is_at(self.wxid):  # 被@
            #     self.toAt(msg)

            # else:  # 其他消息
            #     self.toChengyu(msg)

            return  # 处理完群聊信息，后面就不需要处理了

        # 非群聊信息，按消息类型进行处理
        #if msg.type == 37:  # 好友请求
            #self.autoAcceptFriendRequest(msg)

        #elif msg.type == 10000:  # 系统信息
            #self.sayHiToNewFriend(msg)
        # elif msg.type == 0x03: #图片
        #     pass
        # elif msg.type == 0x34: #语音
        #     pass
        # elif msg.type == 0x01:  # 文本消息
        #     # 让配置加载更灵活，自己可以更新配置。也可以利用定时任务更新。
        #     if msg.from_self():
        #         if msg.content == "^更新$":
        #             Config().reload()
        #             self.LOG.info("已更新")
        #     else:
        #         self.toChitchat(msg)  # 闲聊 

    def vits(self,text, speaker_id=0,noise_scale=0.6, noise_scale_w=0.8, length_scale=1):
        if not len(text):
            self.LOG.error("输入文本不能为空！")
            return 
        limitation = os.getenv("SYSTEM") == "spaces"  # limit text and audio length in huggingface spaces
        text = text.replace('\n', ' ').replace('\r', '').strip()
        if len(text) > 100 and limitation:
            self.LOG.error(f"输入文字过长！{len(text)}>100")
            return
        text =split_language(text) #语言拆分，拆分成后面这个格式 f"[ZH]{text}[ZH]"
        stn_tst, clean_text = self.get_text(text)
        with no_grad():
            x_tst = stn_tst.unsqueeze(0).to(self.device)
            x_tst_lengths = LongTensor([stn_tst.size(0)]).to(self.device)
            speaker_id = LongTensor([speaker_id]).to(self.device)
            # tensor([132]) 0.1 0.668 1.1
            audio = self.vitsNet.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
                                length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()

        #音频后面插入一秒静音
        padding_frames = int(1 * self.vitsHps.data.sampling_rate)
        padding_data = np.zeros(padding_frames, dtype=np.int16)
        audio=np.concatenate((audio, padding_data),axis=0)
        #播放音频
        sd.play(audio, self.vitsHps.data.sampling_rate)
        sd.wait()  # 等待音频播放完成
    def get_text(self,text):
        text_norm, clean_text = text_to_sequence(text, self.vitsHps.symbols, self.vitsHps.data.text_cleaners)
        if self.vitsHps.data.add_blank:
            text_norm = commons.intersperse(text_norm, 0)
        text_norm = LongTensor(text_norm)
        return text_norm, clean_text
        
    def awaken_speech(self):
        self.is_chart=True
       
        future=None
        while True:
            text=''
            if self.is_new_msg:
                text='微信未读消息数'
            elif self.speech:
                record_audio=self.record_audio()
                if self.audio_size==0:
                    if self.is_new_msg:
                        continue
                    break
                if self.speechType=='whisper':
                    text=self.recognize_speech(record_audio)
                    text=traditional_to_simplified(text)
                else:
                    text=self.recognize_speech_mt(record_audio)
            else:
                if future==None:
                    # 提交任务
                    future = self.executor.submit(input,('用户：'))
                    continue
                else:
                    if future.done():
                        # 获取返回值
                        text = future.result()
                        future=None
                    else:
                        time.sleep(2)
                        continue
            if text:
                if self.ai_chart(text)==False:
                    break
            self.LOG.info('完成一次对话')
        self.LOG.info('对话结束')
        self.is_chart=False
    def chat_completion(self):
        if Config().CHAT_ROBOT['apiType']=='glm4':
            tools=get_lm4_tools()
            response = self.client.chat.completions.create(
                model=Config().GLM4['model'], # 填写需要调用的模型名称
                messages=self.history,
                tools=tools,
                tool_choice="auto",
            )
            return response
        else:
            params = dict(model=Config().OPENAI['model'], messages=self.history, stream=False)
            params["functions"] = get_op_tools()
            response = openai.ChatCompletion.create(**params)
            return response
    
   

    def message_handle(self,message,fuctionf=None):
        if fuctionf:
            if Config().CHAT_ROBOT['apiType']=='glm4':
                fuction = fuctionf.function
            else:
                fuction = fuctionf
            function_args = json.loads(fuction.arguments)
            self.LOG.info(f"Tool Name {fuction.name} Rrguments: {fuction.arguments}")
            try:
                if fuction.name == 'resetting_chat_record':
                    self.clear_history=True
                    observation={"success": True,"res": "重置成功" ,"res_type": "text"}
                elif fuction.name in Config().CHAT_ROBOT['tools']:
                    function_args['sel']=self
                    observation = dispatch_tool(fuction.name, function_args)
                else:
                    observation = dispatch_tool(fuction.name, function_args)
            except Exception as e:
                rsp = f'api调用错误: {e}'
            addStr=''
            if isinstance(observation, dict):
                res_type = observation['res_type'] if 'res_type' in observation else 'text'
                res = str(observation['res']) if 'res_type' in observation else str(
                    observation)
                if res_type == 'image':
                    addStr=res
                tool_response = '[Image]' if Config().CHAT_ROBOT['apiType']=='glm3' and res_type == 'image' else res
            else:
                tool_response = observation if isinstance(
                    observation, str) else str(observation)
            self.LOG.info(f"Tool Call Response: {tool_response}")
            self.history.append(nested_object_to_dict(message))
            if Config().CHAT_ROBOT['apiType']=='glm4':
                self.history.append({
                    "role":"tool",
                    "tool_call_id": fuctionf.id,
                    "content":tool_response
                })
            else:
                self.history.append({
                    "role":"function",
                    "name": fuction.name,
                    "content":tool_response
                })
            return addStr
           
        
        if Config().CHAT_ROBOT['apiType']=='glm4':
            if len(message.tool_calls)>0:
                addStr=''
                for call in message.tool_calls:
                   addStr+=self.message_handle(message,call)
                return addStr
        else:
            if message.get("function_call"):
                return self.message_handle(message,message.function_call)
            
    def ai_chart(self,text):
        self.LOG.info('问题：'+text)
        self.history.append({"role": "user", "content": text})
        #try:        
        response=self.chat_completion()
        while (Config().CHAT_ROBOT['apiType']=='glm4'and response.choices[0].finish_reason=='tool_calls' and len(response.choices[0].message.tool_calls)>0) or (Config().CHAT_ROBOT['apiType']!='glm4'and response.choices[0].message.get("function_call")):
            addStr=self.message_handle(response.choices[0].message)
            response = self.chat_completion()
            if Config().CHAT_ROBOT['apiType']=='glm3' and addStr:
                if response.choices[0].message.content.find(addStr)==-1:
                    response.choices[0].message.content+='['+addStr+']'
        if self.clear_history:
            self.clear_history=False
            self.history=[self.history[0]]
        else:
            if Config().CHAT_ROBOT['apiType']=='glm4':
                self.history.append(nested_object_to_dict(response.choices[0].message))
            else:
                self.history.append(response.choices[0].message)
        content=response.choices[0].message.content
        self.LOG.info('回答：'+content)
        if Config().VITS:
            #用re字符串()里面的内容
            content=re.sub(r'\((.*?)\)', '如下', content)
            content=re.sub(r'\[(.*?)\]', '如下', content)
            content=re.sub(r'```(.*?)```', '如下', content)
            self.vits(content,self.speakers.index(self.speaker))

        return True
        # except Exception as e:
        #     self.LOG.info('聊天模型调用异常',e)
        #     return False

    def record_audio(self):
        q = queue.Queue()
        def callback(indata, frames, time, status):
            """This is called (from a separate thread) for each audio block."""
            if status:
                self.LOG.info(status, file=sys.stderr)
            q.put(indata.copy())
        self.LOG.info("开始录音，请说话...")
        audio_data = []
        try:
            self.awaken=True
            self.audio_size=0
            self.refresh_time=time.time()
            wait_count=0
            with sd.InputStream(samplerate=self.rate, device=None,channels=self.channels, callback=callback):
                while wait_count<self.chat_size_out:
                    audio_data.append(q.get())
                    if time.time()-self.refresh_time>self.record_time_out:
                        if self.audio_size>0:
                            break
                        self.refresh_time=time.time()
                        wait_count+=1
                    


            audio_data=np.concatenate(audio_data).flatten()
        except KeyboardInterrupt:
            pass
        finally:
            self.LOG.info("停止录音...")
            self.awaken=False

        return audio_data
    def recognize_speech(self,audio_data):
        try:
            audio = whisper.pad_or_trim(audio_data)  
            # make log-Mel spectrogram and move to the same device as the model  
            mel = whisper.log_mel_spectrogram(audio).to("cuda")  
            # detect the spoken language  
            _, probs = self.model.detect_language(mel)  
            self.LOG.info(f"Detected language: {max(probs, key=probs.get)}")  
            # decode the audio  
            options = whisper.DecodingOptions(fp16 = False)  
            result = whisper.decode(self.model, mel, options)  
            return result.text
        except Exception as e:
            return "无法识别语音"+str(e)
        
    def recognize_speech_mt(self,audio_data):
        try:
            rec_result = self.model(audio_in=audio_data)
            return rec_result['text']
        except Exception as e:
            return "无法识别语音"
        
    def start_listen(self):
        if self.speech:
            self.awaken=False
            self.is_chart=False
            self.refresh_time=time.time()
            self.LOG.info("开始监听")
            for phrase in self.speech:
                #去掉前后空格
                word=str(phrase).strip()
                self.LOG.info("-----------")
                self.refresh_time=time.time()
                if self.awaken==False and self.is_chart==False and word in self.wake_words: # 唤醒状态
                    thread=Thread(target=self.awaken_speech, name="AwakenSpeech", args=(), daemon=True)
                    thread.start()
                    self.LOG.info('---------唤醒对话-----------')
                if self.awaken:
                    self.audio_size+=1
        else:
            self.awaken_speech()
                

if __name__ == '__main__':
    #wcf = Wcf(debug=True)
    robot=ChatRobot()
    robot.enableReceivingMsg()
    robot.start_listen()
