import requests
import json
import pyaudio
import wave
import base64
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QPushButton, QTextEdit, 
                            QHBoxLayout, QFrame, QSpacerItem, QSizePolicy)
from PyQt5.QtCore import QTimer, pyqtSignal, Qt
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QFont, QPalette, QColor

# 百度语音API信息
API_KEY = 'i6jD0ddvKDuFs8xEpGILYHAR'
SECRET_KEY = 'XdcB5AEb2EqjJpZf2N92Olkvr9FCCuCO'

# DeepSeek API信息
DEEPSEEK_API_KEY = "sk-a6a93b36ac7a42bf9177f759d3f7da3e"

# 录音参数
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 1024
WAVE_OUTPUT_FILENAME = "record.wav"

class SpeechWidget(QWidget):
    text_updated = pyqtSignal(str)

    def __init__(self):
        super().__init__()
        self.init_ui()
        self.media_player = QMediaPlayer()
        self.is_recording = False
        self.audio = pyaudio.PyAudio()
        self.frames = []
        self.stream = None

    def init_ui(self):
        # 创建主布局
        main_layout = QVBoxLayout()
        
        # 创建标题
        title_label = QLabel("医疗助手小飞")
        title_font = QFont("Microsoft YaHei", 24, QFont.Bold)
        title_label.setFont(title_font)
        title_label.setAlignment(Qt.AlignCenter)
        title_label.setStyleSheet("color: #2c3e50; margin: 20px;")
        
        # 创建状态框
        status_frame = QFrame()
        status_frame.setFrameStyle(QFrame.StyledPanel)
        status_frame.setStyleSheet("""
            QFrame {
                background-color: #f8f9fa;
                border-radius: 10px;
                padding: 10px;
            }
        """)
        status_layout = QVBoxLayout()
        self.status_label = QLabel("就绪")
        self.status_label.setFont(QFont("Microsoft YaHei", 12))
        self.status_label.setAlignment(Qt.AlignCenter)
        status_layout.addWidget(self.status_label)
        status_frame.setLayout(status_layout)
        
        # 创建对话显示区域
        self.text_display = QTextEdit()
        self.text_display.setReadOnly(True)
        self.text_display.setMinimumHeight(300)
        self.text_display.setFont(QFont("Microsoft YaHei", 11))
        self.text_display.setStyleSheet("""
            QTextEdit {
                background-color: white;
                border: 2px solid #e0e0e0;
                border-radius: 10px;
                padding: 10px;
            }
        """)
        
        # 创建按钮区域
        button_layout = QHBoxLayout()
        self.btn_record = QPushButton("开始录音")
        self.btn_record.setFont(QFont("Microsoft YaHei", 12))
        self.btn_record.setStyleSheet("""
            QPushButton {
                background-color: #3498db;
                color: white;
                border-radius: 5px;
                padding: 10px 20px;
                min-width: 120px;
            }
            QPushButton:hover {
                background-color: #2980b9;
            }
            QPushButton:pressed {
                background-color: #2472a4;
            }
        """)
        self.btn_record.clicked.connect(self.toggle_recording)
        
        # 添加按钮到布局
        button_layout.addStretch()
        button_layout.addWidget(self.btn_record)
        button_layout.addStretch()
        
        # 组装主布局
        main_layout.addWidget(title_label)
        main_layout.addWidget(status_frame)
        main_layout.addWidget(self.text_display)
        main_layout.addLayout(button_layout)
        
        self.setLayout(main_layout)

    def toggle_recording(self):
        if not self.is_recording:
            self.start_recording()
        else:
            self.stop_recording()

    def start_recording(self):
        self.is_recording = True
        self.btn_record.setText("停止录音")
        self.status_label.setText("正在录音...")
        self.frames = []
        
        try:
            self.stream = self.audio.open(format=FORMAT,
                                        channels=CHANNELS,
                                        rate=RATE,
                                        input=True,
                                        frames_per_buffer=CHUNK)
            
            # 开始录音
            self.timer = QTimer()
            self.timer.timeout.connect(self.record_chunk)
            self.timer.start(100)  # 每100ms记录一次
            
        except Exception as e:
            self.status_label.setText(f"录音错误: {str(e)}")
            self.stop_recording()

    def record_chunk(self):
        if self.is_recording and self.stream:
            try:
                data = self.stream.read(CHUNK)
                self.frames.append(data)
            except Exception as e:
                self.status_label.setText(f"录音错误: {str(e)}")
                self.stop_recording()

    def stop_recording(self):
        self.is_recording = False
        self.btn_record.setText("开始录音")
        self.status_label.setText("处理中...")
        
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
            self.stream = None
        
        if self.frames:
            # 保存录音
            wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
            wf.setnchannels(CHANNELS)
            wf.setsampwidth(self.audio.get_sample_size(FORMAT))
            wf.setframerate(RATE)
            wf.writeframes(b''.join(self.frames))
            wf.close()
            
            # 识别语音
            text = self.speech_recognition(WAVE_OUTPUT_FILENAME)
            if text:
                self.text_display.append(f"患者: {text}")
                # 获取AI回复
                reply = self.get_deepseek_response(text)
                if reply:
                    self.text_display.append(f"小智: {reply}")
                    # 将AI回复转换为语音
                    self.text_to_speech(reply)
                self.text_updated.emit(text)
        
        self.status_label.setText("就绪")

    def get_token(self):
        url = f"https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id={API_KEY}&client_secret={SECRET_KEY}"
        res = requests.get(url)
        return res.json()['access_token']

    def speech_recognition(self, filename):
        try:
            token = self.get_token()
            with open(filename, 'rb') as f:
                speech_data = f.read()
            speech_base64 = base64.b64encode(speech_data).decode('utf-8')
            length = len(speech_data)
            
            headers = {'Content-Type': 'application/json'}
            data = {
                'format': 'wav',
                'rate': 16000,
                'channel': 1,
                'token': token,
                'cuid': '123456PYQT',
                'len': length,
                'speech': speech_base64,
                'dev_pid': 1537  # 普通话
            }
            
            url = 'https://vop.baidu.com/server_api'
            response = requests.post(url, headers=headers, data=json.dumps(data))
            result = response.json()
            
            if result.get('err_no') == 0:
                return result['result'][0]
            else:
                self.status_label.setText(f"识别失败: {result.get('err_msg', '未知错误')}")
                return None
                
        except Exception as e:
            self.status_label.setText(f"识别错误: {str(e)}")
            return None

    def get_deepseek_response(self, prompt):
        try:
            system_prompt = """你是一个医院助理的医疗机器人，名叫小飞。你需要：
1. 给患者提供关爱和关怀
2. 用温暖友善的语气交流
3. 作为机械臂助手，可以帮患者拿东西
4. 在回答中体现专业性和同理心
5. 适当询问患者的感受和需求
请用简短、温暖的方式回答。"""
            
            url = "https://api.deepseek.com/v1/chat/completions"
            headers = {
                "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
                "Content-Type": "application/json"
            }
            data = {
                "model": "deepseek-chat",
                "messages": [
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": prompt}
                ],
                "temperature": 0.7,
                "max_tokens": 512
            }
            
            response = requests.post(url, headers=headers, json=data)
            result = response.json()
            
            if "choices" in result and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]
            else:
                self.status_label.setText("DeepSeek API 调用失败")
                return None
                
        except Exception as e:
            self.status_label.setText(f"DeepSeek API 错误: {str(e)}")
            return None

    def text_to_speech(self, text):
        try:
            token = self.get_token()
            url = "http://tsn.baidu.com/text2audio"
            headers = {
                'Content-Type': 'application/x-www-form-urlencoded'
            }
            data = {
                'tex': text,
                'tok': token,
                'cuid': '123456PYQT',
                'ctp': 1,
                'lan': 'zh',
                'vol': 5,
                'per': 0
            }
            response = requests.post(url, headers=headers, data=data)
            if 'audio' in response.headers['Content-Type']:
                with open('response.mp3', 'wb') as f:
                    f.write(response.content)
                self.status_label.setText("语音合成成功，正在播放...")
                # 播放语音
                self.media_player.setMedia(QMediaContent(QUrl.fromLocalFile('response.mp3')))
                self.media_player.play()
            else:
                self.status_label.setText(f"语音合成失败: {response.text}")
        except Exception as e:
            self.status_label.setText(f"语音合成错误: {str(e)}")

    def clear_text(self):
        self.text_display.clear()

    def closeEvent(self, event):
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
        self.audio.terminate()
        self.media_player.stop()
        event.accept()
