import sys
import cv2
import threading
import numpy as np
import sounddevice as sd
import soundfile as sf
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtCore import QTimer, Qt, pyqtSignal, QMetaObject, Qt, Q_ARG
from PyQt5.QtGui import QImage, QPixmap
from gui_layout import create_gui_layout
from gui_style import MAIN_STYLE_SHEET

class CameraMicGUI(QWidget):
    ai_result_signal = pyqtSignal(str, bool)  # (result, is_error)
    asr_result_signal = pyqtSignal(str)      # 新增：语音转文字结果信号
    def __init__(self):
        super().__init__()
        self.setWindowTitle('摄像头与麦克风录制+聊天')
        self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint)
        self.resize(1000, 680)
        self.setStyleSheet(MAIN_STYLE_SHEET)
        # 使用外部布局
        gui = create_gui_layout()
        self.setLayout(gui["main_layout"])
        self.image_label = gui["image_label"]
        self.button = gui["button"]
        self.chat_display = gui["chat_display"]
        self.chat_input = gui["chat_input"]
        self.send_button = gui["send_button"]

        # 绑定信号
        self.button.pressed.connect(self.start_recording)
        self.button.released.connect(self.stop_recording)
        self.chat_input.returnPressed.connect(self.send_chat)
        self.send_button.clicked.connect(self.send_chat)

        # 摄像头
        self.cap = cv2.VideoCapture(0)
        self.timer = QTimer()
        self.timer.timeout.connect(self.update_frame)
        self.timer.start(30)

        # 麦克风
        self.is_recording = False
        self.audio_thread = None
        self.audio_data = []
        self.samplerate = 16000
        self.channels = 1
        input_devices = [i for i, dev in enumerate(sd.query_devices()) if dev['max_input_channels'] > 0]
        self.input_device = input_devices[0] if input_devices else None
        self.ai_result_signal.connect(self.on_ai_result)
        self.asr_result_signal.connect(self.on_asr_result)

    def update_frame(self):
        ret, frame = self.cap.read()
        if ret:
            rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
            self.image_label.setPixmap(QPixmap.fromImage(qt_image))

    def start_recording(self):
        if not self.is_recording:
            self.is_recording = True
            self.button.setText('松开结束')
            self.audio_data = []
            self.audio_thread = threading.Thread(target=self.record_audio)
            self.audio_thread.start()

    def stop_recording(self):
        if self.is_recording:
            self.is_recording = False
            self.button.setText('按住说话')
            # 等待录音线程结束
            if self.audio_thread is not None:
                self.audio_thread.join()
            # 保存音频文件
            audio_bytes = None
            if self.audio_data:
                audio = np.concatenate(self.audio_data, axis=0)
                sf.write('output.wav', audio, self.samplerate)
                # 转为字节流
                import io
                buf = io.BytesIO()
                sf.write(buf, audio, self.samplerate, format='WAV')
                audio_bytes = buf.getvalue()
                self.last_audio_bytes = audio_bytes
            else:
                self.last_audio_bytes = None
            # 拍摄一张照片
            ret, frame = self.cap.read()
            if ret:
                # 使用PNG格式进行无损压缩，压缩级别为3（0-9，3为较快且压缩率适中）
                encode_param = [cv2.IMWRITE_PNG_COMPRESSION, 3]
                _, buf = cv2.imencode('.png', frame, encode_param)
                image_bytes = buf.tobytes()
                self.chat_display.append('<i>语音识别中...</i>')
                QApplication.processEvents()
                def asr_and_ai_task():
                    import runAi
                    import tts_dir.vosk.runVosk as runVosk
                    try:
                        model = runVosk.get_model()
                        asr_text = runVosk.recognize_wav_file_return_text(model, 'output.wav')
                        if not asr_text:
                            asr_text = '（未识别到语音内容）'
                        default_question = asr_text
                        # 用信号通知主线程显示语音转文字结果
                        self.asr_result_signal.emit(default_question)
                        # 只有识别到有效语音内容时才调用AI接口
                        if asr_text != '（未识别到语音内容）':
                            result = runAi.ask_image_question(default_question, image_bytes, None, audio_format='wav')
                            self.ai_result_signal.emit(result, False)
                        else:
                            # 主动回复用户未听清
                            self.ai_result_signal.emit('抱歉我没听清楚你说了什么。', False)
                    except Exception as e:
                        self.ai_result_signal.emit(f'AI调用出错: {e}', True)
                threading.Thread(target=asr_and_ai_task, daemon=True).start()

    def record_audio(self):
        def callback(indata, frames, time, status):
            if self.is_recording:
                self.audio_data.append(indata.copy())
        with sd.InputStream(samplerate=self.samplerate, channels=self.channels, callback=callback, device=self.input_device):
            while self.is_recording:
                sd.sleep(100)

    def send_chat(self):
        import runAi
        from PyQt5.QtWidgets import QMessageBox
        user_text = self.chat_input.text().strip()
        if not user_text:
            return
        # 拍摄当前摄像头照片
        ret, frame = self.cap.read()
        if not ret:
            QMessageBox.warning(self, '错误', '无法获取摄像头图片')
            return
        encode_param = [cv2.IMWRITE_PNG_COMPRESSION, 3]
        _, buf = cv2.imencode('.png', frame, encode_param)
        image_bytes = buf.tobytes()
        # 显示用户输入到聊天记录
        if getattr(self, 'last_audio_bytes', None):
            self.chat_display.append(f'<b>我：</b> [语音已发送] {user_text}')
        else:
            self.chat_display.append(f'<b>我：</b> {user_text}')
        self.chat_input.clear()
        self.send_button.setEnabled(False)
        self.chat_input.setEnabled(False)
        self.chat_display.append('<i>AI正在思考...</i>')
        QApplication.processEvents()
        audio_bytes = getattr(self, 'last_audio_bytes', None)
        def ai_task():
            try:
                result = runAi.ask_image_question(user_text, image_bytes, audio_bytes, audio_format='wav')
                self.ai_result_signal.emit(result, False)
            except Exception as e:
                self.ai_result_signal.emit(f'AI调用出错: {e}', True)
        threading.Thread(target=ai_task, daemon=True).start()

    def on_ai_result(self, result, is_error):
        # 移除“AI正在思考...”
        text = self.chat_display.toPlainText()
        if text.endswith('AI正在思考...'):
            self.chat_display.setPlainText(text[:-9])
        if is_error and result:
            print(f'AI调用出错: {result}')
            self.chat_display.append(f'<span style="color:red;"><b>错误：</b>{result}</span>')
        elif result:
            self.chat_display.append(f'<b>AI：</b> {result}')
            # 新增：AI回复朗读
            try:
                import pyttsx3
                engine = pyttsx3.init()
                engine.setProperty('rate', 280)  # 语速加快一倍
                engine.say(result)
                engine.runAndWait()
            except Exception as e:
                print(f'语音播报出错: {e}')
        self.send_button.setEnabled(True)
        self.chat_input.setEnabled(True)

    def on_asr_result(self, asr_text):
        # 移除“语音识别中...”
        text = self.chat_display.toPlainText()
        if text.endswith('语音识别中...'):
            self.chat_display.setPlainText(text[:-8])
        self.chat_display.append(f'<b>我：</b> [语音转文字] {asr_text}')
        self.chat_display.append('<i>AI正在思考...</i>')
        QApplication.processEvents()

    def closeEvent(self, event):
        self.cap.release()
        event.accept()

if __name__ == '__main__':
    app = QApplication(sys.argv)
    gui = CameraMicGUI()
    gui.resize(640, 520)
    gui.show()
    sys.exit(app.exec_()) 