import sys
import os
import pyaudio
import wave
import json
import base64
import time
import cv2  # 需要安装opencv-python
import configparser
from openai import OpenAI
from urllib.request import urlopen, Request
from urllib.error import URLError
from pathlib import Path
import pyttsx3

from PySide6.QtWidgets import QApplication,QMainWindow,QLabel
from PySide6.QtGui import QImage, QPixmap
from PySide6.QtCore import QTimer, Qt, QThread, Signal

IS_PY3 = sys.version_info.major == 3
if IS_PY3:
    from urllib.request import urlopen
    from urllib.request import Request
    from urllib.error import URLError
    from urllib.parse import urlencode
    timer = time.perf_counter

TOKEN_URL = 'http://aip.baidubce.com/oauth/2.0/token'

root_dir = Path(__file__).resolve().parent.parent


# region 录音线程
class AudioRecorder(QThread):
    finished = Signal(str)
    
    def __init__(self, config):
        super().__init__()
        self.rate = config.getint('stt', 'RATE', fallback=16000)
        self.chunk = config.getint('stt', 'CHUNK', fallback=1024)
        self._is_recording = True

    def run(self):
        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paInt16,
                        channels=1,
                        rate=self.rate,
                        input=True,
                        frames_per_buffer=self.chunk)
        frames = []
        
        while self._is_recording:
            data = stream.read(self.chunk)
            frames.append(data)
        
        stream.stop_stream()
        stream.close()
        p.terminate()
        
        filename = "temp_recording.wav"
        with wave.open(filename, 'wb') as wf:
            wf.setnchannels(1)
            wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
            wf.setframerate(self.rate)
            wf.writeframes(b''.join(frames))
        
        self.finished.emit(filename)

    def stop(self):
        self._is_recording = False
# region ai线程
class AIChatThread(QThread):
    finished = Signal(str)  # 添加完成信号，携带AI响应文本
    
    def __init__(self, client, user_input):
        super().__init__()
        self.client = client
        self.user_input = user_input

    def run(self):
        print("正在问ai:",self.user_input)
        if self.user_input is None:
            self.finished.emit("请输入问题")
        fresh_messages = [
            {"role": "system", "content": "你是一位精通中国古典诗词的专家，熟悉唐诗宋词各流派风格，能够鉴赏分析古诗的意境与创作背景"},
            {"role": "user", "content": self.user_input}
        ]
        response = self.client.chat.completions.create(
            model="deepseek-chat",
            messages=fresh_messages,
            stream=False
        )
        self.finished.emit(response.choices[0].message.content)
# endregion

# region tts线程
class TTSThread(QThread):
    def __init__(self, text, engine):
        super().__init__()
        self.text = text
        self.engine = engine

    def run(self):
        print("正在朗读：", self.text)
        self.engine.say(self.text)
        self.engine.runAndWait()
# endregion

# region 窗口
class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setGeometry(50, 50, 800, 600)
        
        self.config = configparser.ConfigParser()
        config_file = os.path.join(root_dir, 'speaktoai\\mode_config.ini')
        self.config.read(config_file,encoding='utf-8') 

        self.api_key = self.config.get('stt', 'API_KEY')
        self.secret_key = self.config.get('stt', 'SECRET_KEY')
        self.cuid = self.config.get('stt', 'CUID')
        self.dev_pid = self.config.getint('stt', 'DEV_PID')
        self.scope = self.config.get('stt', 'SCOPE')
        self.asr_url = self.config.get('stt', 'ASR_URL')

        ds_api=self.config.get('ai', 'API_KEY')
        self.client = OpenAI(api_key=ds_api, base_url="https://api.deepseek.com")

        self.recorder = None

        self.status_label = QLabel("鼠标移入窗口开始录音", self)
        self.status_label.setAlignment(Qt.AlignTop | Qt.AlignLeft)
        self.status_label.setStyleSheet("color: white; background-color: rgba(0,0,0,0.5);")

        self.tts_engine = self.init_engine()

        # 创建视频显示标签
        self.video_label = QLabel(self)
        self.setCentralWidget(self.video_label)
        
        # 初始化视频相关参数
        video_path = self.config.get('role', 'VIDEO_PATH')
        self.cap = cv2.VideoCapture(video_path)  # 替换为你的视频路径
        self.fps = self.cap.get(cv2.CAP_PROP_FPS)
        
        # 设置定时器
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update_frame)
        self.timer.start(1000 // self.fps)  # 按视频实际帧率刷新

    def update_frame(self):
        ret, frame = self.cap.read()
        if ret:
            # 将OpenCV图像转换为Qt格式
            rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            qt_image = QImage(rgb_image.data, w, h, ch * w, QImage.Format_RGB888)
            self.video_label.setPixmap(QPixmap.fromImage(qt_image))
        else:
            # 视频播放完毕，重置到开头
            self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# region 触摸事件         
    def enterEvent(self, event):  # 添加缺失的enterEvent方法
        self.status_label.setText("录音中...")
        self.recorder = AudioRecorder(self.config)
        self.recorder.finished.connect(self.on_recording_finished)
        self.recorder.start()

    def leaveEvent(self, event):
        if self.recorder and self.recorder.isRunning():  # 添加安全判断
            self.recorder.stop()
        self.status_label.setText("鼠标移入窗口开始录音")

    def on_recording_finished(self, filename):
        stt_result=self.speech_to_text(filename)
        if stt_result['status'] == 'success'and stt_result['result']:
            # 创建AI线程
            self.ai_thread = AIChatThread(self.client, stt_result['result'])
            self.ai_thread.finished.connect(self.handle_ai_response)
            self.ai_thread.start()
        else:
            error_msg = stt_result.get('message', '未知错误')
            self.status_label.setText(f"识别失败: {error_msg}")
        
        if os.path.exists(filename):
            os.remove(filename)

    def handle_ai_response(self, response_text):
        # 创建TTS线程
        self.tts_thread = TTSThread(response_text, self.tts_engine)
        self.tts_thread.start()
        # 更新界面显示
        self.ai_response = response_text  # 保持原有变量兼容
# endregion

# region stt
    def fetch_token(self):
        params = {
            'grant_type': 'client_credentials',
            'client_id': self.api_key,
            'client_secret': self.secret_key
        }
        post_data = urlencode(params)
        if IS_PY3:
            post_data = post_data.encode('utf-8')
        req = Request(TOKEN_URL, post_data)
        
        try:
            f = urlopen(req)
            result_str = f.read()
        except URLError as err:
            print(f'token请求失败，错误码：{err.code}')
            result_str = err.read()

        if IS_PY3:
            result_str = result_str.decode()
        
        result = json.loads(result_str)
        if 'access_token' in result and 'scope' in result:
            if  self.scope and (self.scope not in result['scope'].split()):
                print('scope校验失败')
                raise Exception('scope校验失败')
            return result['access_token']
        else:
            raise print('请检查API_KEY和SECRET_KEY')
    def speech_to_text(self,audio_path):
        """语音识别主函数"""
        try:
            # 获取访问令牌
            token = self.fetch_token()
            
            # 读取音频文件
            with open(audio_path, 'rb') as f:
                speech_data = f.read()
            
            # 准备请求参数
            params = {
                'dev_pid': self.dev_pid,
                'format': audio_path[-3:].lower(),
                'rate': 16000,
                'token': token,
                'cuid': self.cuid,
                'channel': 1,
                'speech': base64.b64encode(speech_data).decode('utf-8'),
                'len': len(speech_data)
            }
            
            # 发送请求
            req = Request(self.asr_url, json.dumps(params).encode('utf-8'))
            req.add_header('Content-Type', 'application/json')
            
            with urlopen(req) as f:
                result = json.loads(f.read().decode('utf-8'))
                self.result = result.get('result', [])[0] if result.get('result') else None
            
            return {
                'status': 'success',
                'result': result.get('result', [])[0] if result.get('result') else None
            }
        
        except Exception as e:
            return {
                'status': 'error',
                'message': str(e)
            }
# endregion

# region 语音合成
    def init_engine(self):
        # 初始化语音引擎
        engine = pyttsx3.init()
        # 获取当前语音速率
        rate = engine.getProperty('rate')
        engine.setProperty('rate', rate - 30)  # 放慢语速
        return engine
# endregion
    def closeEvent(self, event):
        # 释放资源
        self.cap.release()
        self.timer.stop()
        super().closeEvent(event)


if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = MainWindow()
    window.show()
    
    sys.exit(app.exec())