import threading
import pyttsx3
from datetime import datetime, timedelta
import time
import pocketsphinx
import vosk
import pyaudio
import sounddevice as sd
import numpy as np
import wave
import speech_recognition as sr
from zhipuai import ZhipuAI
import noisereduce as nr
from aip import AipSpeech


class ConferenceHost:
    
    # 录音参数
    CHUNK = 1024
    FORMAT = pyaudio.paInt16

    CHANNELS = 1
    RATE = 16000
    RECORD_SECONDS = 60  # 录音时长，可自行修改
        
    """ 百度 的 APPID AK SK """
    APP_ID = '18397084' 
    API_KEY = 'q5VjqAAK4Tqh5ljqQSZ9WOLx'
    SECRET_KEY = 'HlERX1FcxoDW4SjKhzjyK1lymeAwmzQQ'
    
    ZHIPU_API_KEY = 'ab4891787863816b83378080d2d4c9ae.m8aFMCOqWWPPOJhz'
    
    
    def __init__(self):
        # 话题
        self.topic = []
        
        # 语音识别  
        self.engine = pyttsx3.init()
        self.flag = 0
        
        # 偏题标识
        self.slant_flag = 0
        self.topic_content = ""
        
        # 初始化语音识别和文本到语音转换引擎
        self.recognizer = pocketsphinx.Decoder()
        
        self.sr_recognizer = sr.Recognizer()
        # 设置语音识别的持续时间
        self.duration = 5  # 5秒
        # 会议开始时间
        self.start_time = datetime.now()
        # 会议结束时间
        self.end_time = self.start_time + timedelta(minutes=30)
        # 会议记录
        self.record = []
        # 关键字列表
        self.keywords = ["小星同学","小新同学","小心同学","同学"]
        # 会议的内容列表
        self.metting_content_list = []
        
        # self.api_key = "ab4891787863816b83378080d2d4c9ae.m8aFMCOqWWPPOJhz"
        
        
    ########主线方法########
    def run(self):
        try:
            
            tmp_num = 0
            flag = True
            
            for i in self.topic:
                
                self.topic_content += i + "，"
            
            # 介绍会议
            self.introduce_metting()
            
            while flag:
                # 录音
                record_filename = self.record_audio_main()
                
                # 识别
                remote_res = self.recognizer_by_baidu(record_filename)
                
                if remote_res['err_no'] == 0:
                    # 正常识别
                    self.metting_content_list.append(remote_res['result'][0])

                
                tmp_num += 1
                if tmp_num > 2:
                    flag = False
                    print("完结")
                
            if len(self.metting_content_list) > 0:
                    self.metting_summary()
        except Exception as e:
            print(f"系统：run 异常：{e}")
           
    
    
    
    # 会议总结
    def metting_summary(self):
        
        try:
            
            metting_content = "。".join(self.metting_content_list)
            
            send_to_ai_content = f"""
                帮我总结下今天会议的内容；
                会议内容为：{metting_content}
            """
            
            print(send_to_ai_content)
            
            res = self.send_to_ai(self,send_to_ai_content)
            
            print(res)
            
        except Exception as e:
            pass
        
    # 发送内容到ai
    def send_to_ai(self,text):
        
        print('系统：正在调用AI模型...')
        client = ZhipuAI(api_key=self.ZHIPU_API_KEY) # 请填写您自己的APIKey
        
        # print(final_text)
        response = client.chat.completions.create(
        model="glm-4",  # 填写需要调用的模型名称
            messages=[
                {"role": "user", "content": text},
            ],
            stream=True,
            )

        final_str = ""
        for chunk in response:
            final_str += chunk.choices[0].delta.content
            # print(chunk.choices[0].delta)
        
        return final_str
            
    
    def recognizer_by_baidu(self,record_filename):
        
        client = AipSpeech(self.APP_ID, self.API_KEY, self.SECRET_KEY)

        # 读取文件
        def get_file_content(filePath):
            with open(filePath, 'rb') as fp:
                return fp.read()

        # 识别本地文件
        res = client.asr(get_file_content(record_filename), 'wav', 16000, {
            'dev_pid': 1537,
        })

        print(res)        
        print(type(res))
        
        print(res['err_no'])
        print(res['result'][0])
        
        return res
            
    def check_is_out_topic(self,filename):
        
        recognition_thread = threading.Thread(target=self.perform_speech_recognition, args=(filename,))
        recognition_thread.start()

    def perform_speech_recognition(self, filename):
        # 这里是之前的方法中的代码，移动到这里作为新线程的任务
        # 初始化识别器
        recognizer = sr.Recognizer()
        # 打开录音文件
        with sr.AudioFile(filename) as source:
            audio_data = recognizer.record(source)  # 读取音频文件
        # 使用Google的免费API进行识别
        try:
            # 识别语音
            print("系统：正在识别语音，并转文字...")
            text = recognizer.recognize_google(audio_data, language='zh-CN')
            print("识别的文本：", text)
            
            # 此处可以调用AI模型处理文本
            ai_response = self.send_to_ai_main(text)
            print(f"AI的回答是：{ai_response}")
            
            if len(ai_response) <= 3 and int(ai_response) >= 9:
                print('系统：您已偏题！！！')
                self.slant_flag = 1
                # tip = '请注意，当前的话题和本次的主题内容关系不大；'
                # self.speak(tip)
            else:
                print('系统：未偏题')
                
        except sr.UnknownValueError:
            # 没有听清
            print("系统：您没有说话")
        except sr.RequestError as e:
            # 调用失败
            print(f"Google 语音识别服务出错; {e}")
    

    
    
        
    
    def speak(self,text):
        
        try:
            self.engine.say(text)
            
            # 存储所有要播放的文本，然后一次性播放
            # if self.flag < 1:
            self.engine.runAndWait()
            self.engine.stop()
        except Exception as e:
            print(f"speak 方法异常: {e}")
    
    def add_to_metting(self, item):
        self.metting_list.append(item)
    
##########################
    
    # 记录语音
    def record_audio_main(self):

        p = pyaudio.PyAudio()

        stream = p.open(format=self.FORMAT,
                        channels=self.CHANNELS,
                        rate=self.RATE,
                        input=True,
                        frames_per_buffer=self.CHUNK)

        print(f"系统：开始录音，持续{self.RECORD_SECONDS}秒...")
        # print("开始录音...")

        frames = []

        for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):
            data = stream.read(self.CHUNK)
            frames.append(data)

        print("录音结束")

        stream.stop_stream()
        stream.close()
        p.terminate()
        
        now = datetime.now()
        current_time = now.strftime("%y%m%d%H%M%S")
            
        base_filename = f"wavlog/recording_{self.RECORD_SECONDS}s_{self.RATE}hz_{current_time}"

        filename = f"{base_filename}.wav"
        filename_denoised = f"{base_filename}_denoised.wav"
        
        # 保存为 WAV 文件
        wf = wave.open(filename, 'wb')
        wf.setnchannels(self.CHANNELS)
        wf.setsampwidth(p.get_sample_size(self.FORMAT))
        wf.setframerate(self.RATE)
        wf.writeframes(b''.join(frames))
        wf.close()

        return filename
        
        # 重新以读取模式打开文件进行降噪处理
        # wf_read = wave.open(filename, 'rb')
        # audio_data = wf_read.readframes(wf_read.getnframes())
        # audio_array = np.frombuffer(audio_data, dtype=np.int16)
        # data = nr.reduce_noise(y=audio_array, sr=RATE)
        # wf_read.close()

        # # 保存降噪后的音频
        # wf_denoised = wave.open(filename_denoised, 'wb')
        # wf_denoised.setnchannels(CHANNELS)
        # wf_denoised.setsampwidth(p.get_sample_size(FORMAT))
        # wf_denoised.setframerate(RATE)
        # wf_denoised.writeframes(data)
        # wf_denoised.close()
            
    
    
    
    # 全程录音，并推送至远程AI
    def record_audio_main1(self):

        """
        录音函数，duration为录音时长（秒），samplerate为采样率
        """
        duration = 20
        samplerate = 44100
        channels = 1
        
        flag_num = 0
        flag = True
        while flag:
            # 获取当前时间
            now = datetime.now()
            # 或者将它们格式化为字符串
            current_time = now.strftime("%y%m%d%H%M%S")
            
            filename = f"wavlog/recording_{duration}s_{samplerate}hz_{current_time}.wav"
            
            print(f"系统：开始录音，持续{duration}秒...")
            
            # 录制音频
            recorded_frames = sd.rec(int(duration * samplerate), samplerate=samplerate, channels=channels, dtype='int16')

            # 停止录音
            sd.wait()

            # 保存录音到文件"
            with wave.open(filename, 'wb') as wf:
                wf.setnchannels(channels)
                wf.setsampwidth(np.dtype('int16').itemsize)  # 16-bit PCM数据
                wf.setframerate(samplerate)
                wf.writeframes(recorded_frames.tobytes())

            print(f"系统：录音完成，并保存至：{filename}")

            # 新起一个线程，将文件发送给远程大模型
            self.check_is_out_topic(filename)
            
            tip = '请注意，当前的话题和本次的主题内容关系不大；'
            if self.slant_flag == 1:
                self.speak(tip)
                self.slant_flag = 0
            # 单线程使用
            # self.perform_speech_recognition(filename)
            
    # 介绍会议
    def introduce_metting(self):
        topic_num = len(self.topic)
        
        content = f"会议主持人启动成功！...本次的会议主题共{topic_num}个,"
        
        for index,i in enumerate(self.topic):
            # tmp = i+1
            content += f"第{index+1}个主题是{i};"
            
        content += "现在，会议正式开始..."
        print(content)
        
        self.speak(content)
        
    # 设置主题
    def set_topic(self,topic):
        self.topic.append(topic)
    
    def get_topic(self):
        print(self.topic)
    


# 使用示例
if __name__ == "__main__":
    host = ConferenceHost()
    host.set_topic("部门周例会")
    # host.set_topic("周末的旅行计划")

    host.run()
    
    # host.introduce_metting()
    
    # host.get_topic()
    # host.run()
