import sys
import cv2
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication, QMainWindow,QMessageBox
import aiServer.aiserver
from aiServer.ImageUnderstanding import ImageUnderstandingClient
from Layout.layout import LayoutSetup
from PyQt5.QtGui import QImage, QPixmap

appid = "fada3b94"
apisecret = "ODI5NjAwN2JhYjFlOThhZWY2MjhlMWMy"
apikey = "fa3e7dd55775919bebcdf3daa423d802"
Spark_url = "wss://spark-api.xf-yun.com/v3.5/chat"
text = [
    {
        "role": "system",
        "content": "假如你是专业的中医，你简要进行望闻问切中的“问”，记住你是专业的中医，除了分析“问”，不用总结，不要说其他废话，而与你对话的人是病人"
    },
    {
        "role": "assistant",
        "content": ""
    }
]

def getText(role, content):
    jsoncon = {"role": role, "content": content}
    text.append(jsoncon)
    return text

def getlength(text):
    return sum(len(content["content"]) for content in text)

def checklen(text):
    while getlength(text) > 8000:
        del text[0]
    return text

class WorkerThread(QThread):
    result_signal = pyqtSignal(str)

    def __init__(self, appid, apikey, apisecret, image_data):
        super().__init__()
        self.appid = appid
        self.apikey = apikey
        self.apisecret = apisecret
        self.image_data = image_data
        self.result_text = ""

    def run(self):
        try:
            client = ImageUnderstandingClient(self.appid, self.apikey, self.apisecret, self.image_data, self.handle_result)
            client.connect()
            self.result_signal.emit(self.result_text)
        except Exception as e:
            print(f"An error occurred in worker thread: {e}")

    def handle_result(self, result):
        self.result_text += result

class PastKing(QMainWindow):
    def __init__(self):
        super().__init__()

        self.layout = LayoutSetup(self)
        self.layout.setup_connections(self.toggle_video, self.analyze_image, self.send_text, self.clear_text, self.show_about)

        self.current_frame = None
        self.sid = ""

        self.layout.cap.open(0)
        if not self.layout.faceCascade.load("./haarcascades/haarcascade_frontalface_default.xml"):
            print("Error loading face cascade")
            return
        self.layout.timer.timeout.connect(self.update_frame)
        self.layout.timer.start(100)

    def update_frame(self):
        ret, frame = self.layout.cap.read()
        if not ret:
            print("Failed to grab frame")
            return

        self.current_frame = frame

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.layout.faceCascade.detectMultiScale(gray, 1.1, 10, 0 | cv2.CASCADE_SCALE_IMAGE, (30, 30))

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (w + x, h + y), (255, 0, 0), 2)

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * frame.shape[2], QImage.Format_RGB888)
        self.layout.vid.setPixmap(QPixmap.fromImage(image))
        self.layout.vid.setScaledContents(True)

    def toggle_video(self):
        if self.layout.cap.isOpened():
            self.layout.cap.release()
            self.layout.timer.stop()
            self.layout.closeVid.setText("开启摄像头")
        else:
            self.layout.cap.open(0)
            self.layout.timer.start(100)
            self.layout.closeVid.setText("关闭摄像头")

    def analyze_image(self):
        try:
            self.layout.analyzeBtn.setEnabled(False)
            gray = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2GRAY)
            faces = self.layout.faceCascade.detectMultiScale(gray, 1.1, 10, 0 | cv2.CASCADE_SCALE_IMAGE, (30, 30))

            if len(faces) > 0:
                _, buffer = cv2.imencode('.jpg', self.current_frame)
                image_data = buffer.tobytes()
                self.worker_thread = WorkerThread(appid, apikey, apisecret, image_data)
                self.worker_thread.result_signal.connect(self.display_result)
                self.worker_thread.start()
            else:
                self.layout.textEdit.append("未检测到人脸")
                self.layout.analyzeBtn.setEnabled(True)
        except Exception as e:
            print(f"An error occurred: {e}")
            self.layout.analyzeBtn.setEnabled(True)

    def display_result(self, result):
        self.layout.textEdit.append(f"分析结果: {result}")
        text[1]["content"] = result
        self.layout.analyzeBtn.setEnabled(True)

    def send_text(self):
        edText = self.layout.lineEdit.text()
        if edText:
            self.layout.textEdit.append(f"用户: {edText}")
            self.layout.textEdit.append("AI: 处理中...")
            question = checklen(getText("user", edText))  # 确保角色为 "user"
            self.worker_thread = QThread()
            self.worker_thread.run = lambda: self.get_ai_response(question)
            self.worker_thread.start()

    def clear_text(self):
        self.layout.textEdit.clear()

    def show_about(self):
        QMessageBox.information(self, "关于", "昔尘科技提供技术支持\n啊伸缩叉删除记录的vi哦索朗茶农is啊咯是阿松大葱那是你咯", QMessageBox.Ok)

    def get_ai_response(self, question):
        try:
            aiServer.aiserver.main(appid, apikey, apisecret, Spark_url, "generalv3.5", question)
            getText("assistant", aiServer.aiserver.answer)
            self.layout.textEdit.append(f"AI: {aiServer.aiserver.answer}")
        except Exception as e:
            self.layout.textEdit.append(f"AI: 处理出错: {e}")

if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = PastKing()
    window.show()
    sys.exit(app.exec_())


""""""""""""""""
# -*- coding: utf-8 -*-
# 导入所需模块

from PyQt5.QtWidgets import QMainWindow, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
import cv2
from PyQt5.QtCore import QThread

# 定义主窗口类
class PastKing(QMainWindow):
    def __init__(self):
        super().__init__()

        # 初始化布局与界面组件
        self.layout = LayoutSetup(self)

        # 连接按钮与对应的槽函数
        self.layout.setup_connections(
            self.toggle_video,   # 摄像头开关
            self.analyze_image,  # 图像分析
            self.send_text,      # 发送文字
            self.clear_text,     # 清空对话
            self.show_about      # 关于窗口
        )

        self.current_frame = None  # 当前摄像头帧图像
        self.sid = ""              # 会话 ID（可扩展用）

        # 打开默认摄像头
        self.layout.cap.open(0)

        # 加载人脸检测模型
        if not self.layout.faceCascade.load("./haarcascades/haarcascade_frontalface_default.xml"):
            print("Error loading face cascade")
            return

        # 设置定时器，每100ms更新一次摄像头画面
        self.layout.timer.timeout.connect(self.update_frame)
        self.layout.timer.start(100)

    # 更新摄像头帧画面并检测人脸
    def update_frame(self):
        ret, frame = self.layout.cap.read()
        if not ret:
            print("Failed to grab frame")
            return

        self.current_frame = frame

        # 将图像转换为灰度图，用于人脸检测
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.layout.faceCascade.detectMultiScale(gray, 1.1, 10, 0 | cv2.CASCADE_SCALE_IMAGE, (30, 30))

        # 在检测到的人脸区域绘制矩形框
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (w + x, h + y), (255, 0, 0), 2)

        # 将图像转换为 Qt 格式并显示在界面上
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[1] * frame.shape[2], QImage.Format_RGB888)
        self.layout.vid.setPixmap(QPixmap.fromImage(image))
        self.layout.vid.setScaledContents(True)

    # 摄像头开关函数
    def toggle_video(self):
        if self.layout.cap.isOpened():
            self.layout.cap.release()
            self.layout.timer.stop()
            self.layout.closeVid.setText("开启摄像头")
        else:
            self.layout.cap.open(0)
            self.layout.timer.start(100)
            self.layout.closeVid.setText("关闭摄像头")

    # 图像分析按钮回调函数
    def analyze_image(self):
        try:
            self.layout.analyzeBtn.setEnabled(False)

            # 转换为灰度图后检测人脸
            gray = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2GRAY)
            faces = self.layout.faceCascade.detectMultiScale(gray, 1.1, 10, 0 | cv2.CASCADE_SCALE_IMAGE, (30, 30))

            if len(faces) > 0:
                # 如果检测到人脸，编码图像为 JPEG 格式字节数据
                _, buffer = cv2.imencode('.jpg', self.current_frame)
                image_data = buffer.tobytes()

                # 创建后台线程发送图像进行分析
                self.worker_thread = WorkerThread(appid, apikey, apisecret, image_data)
                self.worker_thread.result_signal.connect(self.display_result)
                self.worker_thread.start()
            else:
                self.layout.textEdit.append("未检测到人脸")
                self.layout.analyzeBtn.setEnabled(True)
        except Exception as e:
            print(f"An error occurred: {e}")
            self.layout.analyzeBtn.setEnabled(True)

    # 显示图像分析结果
    def display_result(self, result):
        self.layout.textEdit.append(f"分析结果: {result}")
        text[1]["content"] = result  # 更新对话上下文中的 AI 内容
        self.layout.analyzeBtn.setEnabled(True)

    # 用户发送文本到 AI
    def send_text(self):
        edText = self.layout.lineEdit.text()
        if edText:
            self.layout.textEdit.append(f"用户: {edText}")
            self.layout.textEdit.append("AI: 处理中...")

            # 构造带上下文的消息结构
            question = checklen(getText("user", edText))

            # 使用线程调用 AI 接口
            self.worker_thread = QThread()
            self.worker_thread.run = lambda: self.get_ai_response(question)
            self.worker_thread.start()

    # 清空对话内容
    def clear_text(self):
        self.layout.textEdit.clear()

    # 显示“关于”信息
    def show_about(self):
        QMessageBox.information(self, "关于", "昔尘科技提供技术支持\n啊伸缩叉删除记录的vi哦索朗茶农is啊咯是阿松大葱那是你咯", QMessageBox.Ok)

    # 获取 AI 回复（通过 API 接口调用）
    def get_ai_response(self, question):
        try:
            aiServer.aiserver.main(appid, apikey, apisecret, Spark_url, "generalv3.5", question)
            getText("assistant", aiServer.aiserver.answer)
            self.layout.textEdit.append(f"AI: {aiServer.aiserver.answer}")
        except Exception as e:
            self.layout.textEdit.append(f"AI: 处理出错: {e}")
""""""""""""""""