from openai import OpenAI
import cv2
from paddleocr import PaddleOCR
from aip import AipSpeech
import pygame
import requests
import json
import base64
import os
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import RPi.GPIO as GPIO
import time
import joblib
import sys

TASK_COMPLETED = False
IS_EXTENSION_TASK_TWO = False  # 新增全局变量

# 工具函数集合

def format_number(number, decimal_places=2):
    """
    格式化数字，控制小数位数
    参数:
    number - 要格式化的数字
    decimal_places - 要保留的小数位数，默认为2
    返回:
    格式化后的数字字符串
    """
    # 如果decimal_places为0或者数字本身就是整数
    if decimal_places == 0 or number == int(number):
        return str(int(number))
    else:
        # 保留指定位数的小数
        return f"{number:.{decimal_places}f}"
# global_vars = {'is_extension_task_two': False, 'task_completed': False}
class GPIOHandler:
    def __init__(self):
        GPIO.setmode(GPIO.BCM)
        # 按键映射：18->空格，23->目标键
        self.pin_map = {
            18: {'key': 32, 'last_state': GPIO.HIGH},
            23: {'key': 1073742053, 'last_state': GPIO.HIGH},
            21: {'key': 121, 'last_state': GPIO.HIGH},
            4: {'key': 101, 'last_state': GPIO.HIGH}
        }
        for pin in self.pin_map:
            GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)

    def check_keypress(self):
        """返回模拟的按键值（带防抖）"""
        for pin, config in self.pin_map.items():
            current_state = GPIO.input(pin)
            if current_state != config['last_state']:
                time.sleep(0.02)  # 20ms防抖
                config['last_state'] = current_state
                if current_state == GPIO.HIGH:
                    return config['key']
        return None

    def simulate_keypress(self, key_value):
        """模拟按键事件"""
        print(f"模拟按下键值: {key_value}")
        return key_value


gpio_handler = GPIOHandler()


class OfflineFaceRecognizer:
    def __init__(self):
        # 初始化参数
        self.names = []
        with open("task_file/rwyml/name.txt", 'r', encoding='utf-8') as f:
            lines = [line.strip() for line in f.readlines()]
            self.names = lines[:5]  # 前5行为用户名字
            self.roles = lines[5:]  # 第6行及以后为职责信息
        self.num_users = len(self.names)
        self.data_dir = 'data'

        # 初始化识别器
        self.recognizer = cv2.face.LBPHFaceRecognizer_create()
        self.face_net = cv2.dnn.readNetFromCaffe("task_file/deploy.prototxt",
                                                 "task_file/res10_300x300_ssd_iter_140000.caffemodel")

        # 加载字体
        self.font = ImageFont.truetype('task_file/simhei.ttf', 20)

        # 初始化摄像头
        self.cap = cv2.VideoCapture(0)
        self.current_id = 0
        self.counts = [0] * self.num_users
        cv2.namedWindow('Face System', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('Face System', 800, 480)
        cv2.moveWindow('Face System', 0, 0)

        if os.path.exists("task_file/trainer.yml"):
            self.recognizer.read("task_file/trainer.yml")

    def detect_faces_dnn(self, frame):
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))
        self.face_net.setInput(blob)
        detections = self.face_net.forward()
        faces = []
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            if confidence > 0.5:
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")
                faces.append((startX, startY, endX - startX, endY - startY))
        return faces

    def show_initial_interface(self):
        """显示初始选择界面"""
        while True:
            ret, frame = self.cap.read()
            if not ret: break
            frame = cv2.rotate(frame, cv2.ROTATE_180)
            frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            draw = ImageDraw.Draw(frame_pil)
            draw.text((10, 20), "按 S 开始训练 | 空格开始任务", font=self.font, fill=(0, 255, 0))
            frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
            faces = self.detect_faces_dnn(frame)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
            cv2.imshow('Face System', frame)

            key = cv2.waitKey(1)
            gpio_key = gpio_handler.check_keypress()
            if gpio_key == 32:  # GPIO 18触发
                key = 32
            elif gpio_key == 1073742053:  # GPIO 23触发
                key = ord('s')
            elif gpio_key == 101:  # GPIO4触发
                key = ord('e')
            if key == ord('s'):
                self.train_new_model()
                self.cap.release()
                break
            elif key == 32:  # 空格键
                self.cap.release()
                break
            elif key == ord('e'):  # E键进入拓展任务二
                self.cap.release()
                cv2.destroyAllWindows()
                # 确保完全关闭窗口
                for _ in range(3):
                    cv2.waitKey(1)
                return "拓展任务二"
            elif key == 27:  # ESC退出
                cv2.destroyAllWindows()
                return False
        cv2.destroyAllWindows()
        return True

    def train_new_model(self):
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)

        for user_id in range(self.num_users):
            user_dir = os.path.join(self.data_dir, f'user{user_id}')
            if not os.path.exists(user_dir):
                os.makedirs(user_dir)

            print(f"正在采集 {self.names[user_id]} 的数据...")
            count = 0

            while count < 15:
                ret, frame = self.cap.read()
                if not ret: continue
                frame = cv2.rotate(frame, cv2.ROTATE_180)
                frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(frame_pil)
                faces = self.detect_faces_dnn(frame)
                if len(faces) == 1:
                    x, y, w, h = faces[0]
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                    frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                    draw = ImageDraw.Draw(frame_pil)
                    draw.text((x, y - 25), f"检测到人脸，按空格拍摄 ({count}/15)",
                              font=self.font, fill=(0, 255, 0))
                else:
                    draw.text((10, 20), "未检测到人脸", font=self.font, fill=(0, 0, 255))

                draw.text((10, 40), f"当前用户：{self.names[user_id]}",
                          font=self.font, fill=(0, 255, 0))
                draw.text((10, 60), "ESC退出 | 空格拍摄",
                          font=self.font, fill=(0, 255, 0))

                cv2.imshow('Face System', cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR))

                key = cv2.waitKey(30)
                gpio_key = gpio_handler.check_keypress()
                if gpio_key == 32:  # GPIO 18触发
                    key = 32

                if key == 32 and len(faces) == 1:
                    face_roi = frame[y:y + h, x:x + w]
                    gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
                    cv2.imwrite(os.path.join(user_dir, f'{count}.jpg'), gray)
                    count += 1
                    VoiceModule().play_audio("music/叮声.mp3")
                elif key == 27:
                    break
            if user_id < len(self.names) - 1:  # 如果不是最后一个用户
                VoiceModule().play_audio("music/开始下一个.mp3")
        self.train_recognizer()
        print("模型训练完成！")

    def train_recognizer(self):
        faces = []
        labels = []

        for user_id in range(self.num_users):
            user_dir = os.path.join(self.data_dir, f'user{user_id}')
            if not os.path.exists(user_dir):
                continue

            for img_name in os.listdir(user_dir):
                img_path = os.path.join(user_dir, img_name)
                img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
                faces.append(img)
                labels.append(user_id)

        if len(faces) == 0:
            print("错误：没有找到训练数据！")
            return

        self.recognizer.train(faces, np.array(labels))
        self.recognizer.save('task_file/trainer.yml')

    def recognize_user(self):
        self.cap = cv2.VideoCapture(0)
        recognition_result = None
        last_face_rect = None

        try:
            while True:
                ret, frame = self.cap.read()
                if not ret: break
                frame = cv2.rotate(frame, cv2.ROTATE_180)
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frame_pil = Image.fromarray(frame_rgb)
                draw = ImageDraw.Draw(frame_pil)

                faces = self.detect_faces_dnn(frame)
                if len(faces) == 1:
                    x, y, w, h = faces[0]
                    last_face_rect = (x, y, w, h)
                    draw.rectangle([x, y, x + w, y + h],
                                   outline=(0, 255, 0), width=2)
                    # 实时识别当前人脸
                    face_roi = frame[y:y + h, x:x + w]
                    gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
                    id_, confidence = self.recognizer.predict(gray)
                    # 显示识别结果
                    if confidence < 70:  # 置信度阈值
                        name = self.names[id_]
                        display_text = f"{name}"
                        draw.text((x + w + 10, y), display_text, font=self.font, fill=(0, 255, 0))
                    else:
                        draw.text((x + w + 10, y), "未识别到用户", font=self.font, fill=(255, 0, 0))
                    draw.text((10, 30), "检测到人脸，按空格拍照", font=self.font, fill=(0, 255, 0))
                else:
                    last_face_rect = None
                    draw.text((10, 30), "未检测到人脸", font=self.font, fill=(255, 0, 0))

                cv2.imshow('Face System', cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR))

                key = cv2.waitKey(30)
                gpio_key = gpio_handler.check_keypress()
                if gpio_key == 32:  # GPIO 18触发
                    key = 32

                if key == 32:
                    if last_face_rect:
                        filename = "music/captured_photo.jpg"
                        cv2.imwrite(filename, frame)
                        VoiceModule().play_audio("music/叮声.mp3")
                        x, y, w, h = last_face_rect
                        face_roi = frame[y:y + h, x:x + w]
                        gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
                        id_, confidence = self.recognizer.predict(gray)

                        if confidence < 70:
                            recognition_result = self.names[id_]
                            result_pil = Image.fromarray(frame_rgb)
                            result_draw = ImageDraw.Draw(result_pil)
                            result_draw.rectangle([x, y, x + w, y + h], outline=(255, 0, 0), width=2)
                            result_draw.text((x, y - 15), f"{recognition_result}",
                                             font=self.font, fill=(255, 0, 0))
                            cv2.imshow('Face System', cv2.cvtColor(np.array(result_pil), cv2.COLOR_RGB2BGR))
                            cv2.waitKey(1000)
                            return recognition_result
                        else:
                            error_pil = Image.fromarray(frame_rgb)
                            error_draw = ImageDraw.Draw(error_pil)
                            error_draw.text((x, y - 15), "识别失败", font=self.font, fill=(255, 0, 0))
                            cv2.imshow('Face System', cv2.cvtColor(np.array(error_pil), cv2.COLOR_RGB2BGR))
                            cv2.waitKey(1000)
                    break
                elif key == 27:
                    break

        finally:
            cv2.destroyAllWindows()
            if self.cap and self.cap.isOpened():
                self.cap.release()
        return None


class VoiceModule:
    def __init__(self):
        self.client = self.initialize_baidu_tts()

    def initialize_baidu_tts(self):
        APP_ID = '116127254'
        API_KEY = 'VVXIeh9jlVN3FbD9PyOgCsOe'
        SECRET_KEY = 'MrW8rZCv1eY83v8jGHZkD0TbWUwbwZ2N'
        return AipSpeech(APP_ID, API_KEY, SECRET_KEY)

    def baidu_tts(self, text):
        filePath = "music/output.mp3"
        params = {
            'vol': 5,
            'spd': 4,
            'pit': 7,
            'per': 4,
        }
        result = self.client.synthesis(text, 'zh', 1, params)
        if not isinstance(result, dict):
            with open(filePath, 'wb') as f:
                f.write(result)
            print("语音合成成功，已保存为 output.mp3")
            self.play_audio(filePath)
        else:
            print("语音合成失败，错误信息：", result)

    def play_audio(self, file_path):
        pygame.mixer.init()
        pygame.mixer.music.load(file_path)
        pygame.mixer.music.play()
        while pygame.mixer.music.get_busy():
            pygame.time.Clock().tick(10)
        pygame.mixer.quit()

    def play_local_audio(self, task_number):
        file_path = f"music/任务{task_number}.mp3"
        try:
            self.play_audio(file_path)
            print(f"正在播放 {file_path}")
        except Exception as e:
            print(f"播放 {file_path} 失败，错误信息：{e}")


class CameraModule:
    def __init__(self):
        self.camera_capture = None
        self._init_camera()
        self.window_width = 800
        self.window_height = 480
        self.window_name = "Camera" 

    def _init_camera(self):
        """初始化摄像头"""
        if hasattr(self, 'camera_capture') and self.camera_capture is not None:
            self.camera_capture.release()
        self.camera_capture = cv2.VideoCapture(0)
        # 设置摄像头分辨率
        self.camera_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
        self.camera_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)

    def capture_photo(self, is_extension_task_two=False):
        """拍照并保存照片"""
        try:
            if not hasattr(self, 'camera_capture') or not self.camera_capture.isOpened():
                self._init_camera()

            print("按下空格键拍照，按下 ESC 键退出...")
            cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
            cv2.setWindowProperty(self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
            cv2.resizeWindow(self.window_name, self.window_width, self.window_height)
            cv2.moveWindow(self.window_name, 0, 0)

            filename = None
            while True:
                ret, frame = self.camera_capture.read()  # 修正：使用self.camera_capture
                if not ret:
                    print("无法读取图像，请检查摄像头是否正常工作。")
                    break
                frame = cv2.rotate(frame, cv2.ROTATE_180)
                cv2.imshow(self.window_name, frame)
                cv2.resizeWindow(self.window_name, self.window_width, self.window_height)
                key = cv2.waitKey(1)
                gpio_key = gpio_handler.check_keypress()
                if gpio_key == 32:  # GPIO 18触发
                    key = 32
                elif gpio_key == 121:  # GPIO21触发Y键
                    key = 121
                elif gpio_key == 101:  # GPIO4触发E键
                    key = 101
                if is_extension_task_two:
                    key = 32
                if key == 32:
                    filename = "music/captured_photo.jpg"
                    cv2.imwrite(filename, frame)
                    VoiceModule().play_audio("music/拍照声.mp3")
                    print(f"照片已保存为 {filename}")
                    break
                elif key == 27:
                    print("程序已退出。")
                    filename = None
                    break
                elif key == 121:  # Y 键
                    print("检测到 Y 键，进入拓展任务一")
                    filename = "y_key_pressed"
                    break
                elif key == 101:  # E 键
                    print("检测到 E 键，进入拓展任务二")
                    filename = "e_key_pressed"
                    break

            # 释放资源
            cv2.destroyWindow("Camera")  # 只关闭Camera窗口
            if hasattr(self, 'camera_capture') and self.camera_capture.isOpened():
                self.camera_capture.release()
            return filename
        except Exception as e:
            print(f"拍照失败，错误信息：{e}")
            # 异常情况下也确保释放资源
            if hasattr(self, 'camera_capture') and self.camera_capture.isOpened():
                self.camera_capture.release()
            cv2.destroyWindow("Camera")
            return None


class DisplayModule:
    def __init__(self):
        pygame.init()
        self.screen_width, self.screen_height = 800, 480
        self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
        pygame.display.set_caption("任务提示")
        self.font = self.load_font()
        self.clock = pygame.time.Clock()

    def load_font(self):
        try:
            font_path = "task_file/simhei.ttf"
            return pygame.font.Font(font_path, 36)
        except:
            return pygame.font.Font(None, 36)

    def display_task_prompt(self, task_name):
        self.screen.fill((0, 0, 0))
        pygame.time.delay(10)
        pygame.display.flip()
        task_prompt = f"{task_name}开始"
        prompt_surface = self.font.render(task_prompt, True, (255, 255, 255))
        prompt_rect = prompt_surface.get_rect(center=(self.screen_width // 2, self.screen_height // 2 - 30))
        self.screen.blit(prompt_surface, prompt_rect)

        second_line = "请按下按键进入拍照环节"
        second_surface = self.font.render(second_line, True, (255, 255, 255))
        second_rect = second_surface.get_rect(center=(self.screen_width // 2, self.screen_height // 2 + 30))
        self.screen.blit(second_surface, second_rect)

        pygame.display.flip()
        time.sleep(1)

    def display_text_and_play_audio(self, text, voice_module):
        self.screen.fill((0, 0, 0))
        pygame.time.delay(1)
        pygame.display.flip()
        lines = self.split_text(text)
        y_offset = (self.screen_height - len(lines) * 40) // 2
        for line in lines:
            text_surface = self.font.render(line, True, (255, 255, 255))
            text_rect = text_surface.get_rect(center=(self.screen_width // 2, y_offset))
            self.screen.blit(text_surface, text_rect)
            y_offset += 40
        pygame.display.flip()
        pygame.time.delay(200)
        if voice_module is not None:
            voice_module.baidu_tts(text)
        pygame.time.delay(1000)
        
        # 1秒后刷新为黑屏
        self.screen.fill((0, 0, 0))
        pygame.display.flip()
    def passwordwrong_text_and_play_audio(self, text, voice_module):
        file_path = f"music/密码有误.MP3"
        self.screen.fill((0, 0, 0))
        lines = self.split_text(text)
        y_offset = (self.screen_height - len(lines) * 40) // 2
        for line in lines:
            text_surface = self.font.render(line, True, (255, 255, 255))
            text_rect = text_surface.get_rect(center=(self.screen_width // 2, y_offset))
            self.screen.blit(text_surface, text_rect)
            y_offset += 40

        pygame.display.flip()
        try:
            voice_module.play_audio(file_path)
        except Exception as e:
            print(f"播放音频失败: {e}")

    def split_text(self, text):
        max_line_length = 30
        lines = []
        words = text.split(" ")
        current_line = ""
        for word in words:
            if len(current_line) + len(word) + 1 <= max_line_length:
                current_line += " " + word if current_line else word
            else:
                lines.append(current_line)
                current_line = word
        if current_line:
            lines.append(current_line)
        return lines

    def wait_for_key(self):
        print("按下目标键或GPIO23继续...")
        waiting = True
        while waiting:
            # 检测物理按键
            gpio_key = gpio_handler.check_keypress()
            if gpio_key == 1073742053:  # GPIO 23触发
                print("检测到GPIO23按下")
                waiting = False
                break

            # 检测键盘事件
            for event in pygame.event.get():
                if event.type == pygame.KEYDOWN:
                    print(f"按下的键: {event.key} (对应字符: {pygame.key.name(event.key)})")
                    if event.key == 1073742053:
                        print("检测到目标键按下")
                        waiting = False
                        break
                elif event.type == pygame.QUIT:
                    waiting = False
                    break
        self.screen.fill((0, 0, 0))
        pygame.display.flip()

    def quit(self):
        pygame.quit()


class OCRModule:
    def __init__(self):
        self.ocr = PaddleOCR(use_angle_cls=True, lang="ch")

    def ocr_image_to_text(self, img_path):
        result = self.ocr.ocr(img_path, cls=True)
        if not result or not result[0]:
            return ""
        recognized_text = ""
        for line in result[0]:
            text = line[1][0]
            recognized_text += text + "\n"
        return recognized_text.strip()


class TypoDetectionModule:
    def __init__(self):
        self.client = OpenAI(
            api_key="sk-jjzlqcuovvqahaudwzkhmoeysnwxelouefhgvxmzytuulbhj",
            base_url="https://api.siliconflow.cn/v1"
        )
        self.models = [
            'Pro/deepseek-ai/DeepSeek-V3',
            'Qwen/Qwen2.5-72B-Instruct-128K',
            'deepseek-ai/DeepSeek-V3'
        ]
        self.current_model_index = 0

    @staticmethod
    def sync_punctuation(original_text, corrected_text):
        """
        将大模型修正文本中的标点符号同步到原始文本
        保留原始文本的汉字内容，仅修改标点
        """
        # 定义合法标点集合（按需扩展）
        PUNCTUATIONS = {'，', '。', '、', '；', '！', '？'}

        # 双指针遍历
        i = j = 0
        result = []
        len_original, len_corrected = len(original_text), len(corrected_text)

        # 记录需要插入的标点位置
        punctuation_to_insert = []

        while i < len_original and j < len_corrected:
            if original_text[i] == corrected_text[j]:
                # 字符相同，直接保留
                result.append(original_text[i])
                i += 1
                j += 1
            else:
                # 检查是否为标点差异
                if corrected_text[j] in PUNCTUATIONS:
                    # 记录修正文本中的标点位置
                    punctuation_to_insert.append((i, corrected_text[j]))
                    j += 1
                elif original_text[i] in PUNCTUATIONS:
                    # 原始文本中的多余标点，跳过
                    i += 1
                else:
                    # 汉字内容差异，保留原始文本的字符
                    result.append(original_text[i])
                    i += 1
                    j += 1

        # 处理剩余字符
        while i < len_original:
            result.append(original_text[i])
            i += 1

        # 插入标点符号（从后往前避免位置偏移）
        for pos, punct in sorted(punctuation_to_insert, reverse=True):
            result.insert(pos, punct)

        return ''.join(result)

    def process_user_input(self, input_text):
        """
        处理用户输入的文本，调用硅基流动 API 进行错别字检测。
        :param input_text: 用户输入的文本
        :return: 修正后的句子
        """
        last_result = None
        # 构造 Prompt
        prompt = (
            "【身份说明】\n"
            "你是一个非物质文化遗产保护领域的文字校对专家和专业的文字校对专家\n\n"
            "【核心任务】\n"
            "严格检测并修正句子中的两个错别字，只能有两个错误\n\n"
            "【输入文本】\n"
            f"{input_text}\n\n"
            "【修正规则】\n"
            "1. 必须且只能找到两处错误\n"
            "2. 保持原句字数、标点、语序完全不变\n"
            "3. 输出必须与输入长度严格一致\n"
            "4. 没有人名时不要修改人名\n\n"
            "【示例】\n"
            "输入：保全为主，抢救第二，合理利用，传承发展\n"
            "输出：保护为主，抢救第一，合理利用，传承发展\n"
            "输入：传统记忆须要被保存\n"
            "输出：传统技忆需要被保护\n"
            "输入：茶文化须要传呈\n"
            "输出：茶文化需要传承\n\n"
            "请严格按照示例格式输出修正后的完整句子，不要任何解释说明！"
        )

        for attempt in range(len(self.models)):
            try:
                current_model = self.models[self.current_model_index]
                print(f"尝试使用模型: {current_model}")

                response = self.client.chat.completions.create(
                    model=current_model,
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.35,
                    max_tokens=len(input_text) * 2,
                    stream=False
                )
                corrected_text = response.choices[0].message.content.strip()
                input_text = self.sync_punctuation(input_text, corrected_text)
                print(f"正确的: {corrected_text}")
                print(f"改正的: {input_text}")

                errors = self._compare_texts(input_text, corrected_text)

                # 保存当前结果
                if errors:
                    error_message = "和".join([f"第{pos}个字" for pos, _ in errors])
                    corrections = "和".join([f"{correct_char}" for _, correct_char in errors])
                    last_result = f"{error_message}有误，请更正为{corrections}。"
                else:
                    last_result = "未检测到错别字。"

                # 如果检测到多个错别字，切换模型重试
                if len(errors) != 1:
                    print(f"检测到{len(errors)}个错别字，切换模型重试...")
                    self.current_model_index = (self.current_model_index + 1) % len(self.models)
                    time.sleep(1)
                    continue

                return last_result

            except Exception as e:
                print(f"[API 调用异常] {str(e)}")
                self.current_model_index = (self.current_model_index + 1) % len(self.models)
                time.sleep(1)
                continue

        # 所有模型都尝试失败，返回最后一次的结果
        return last_result if last_result else "错别字检测失败。"

    def _compare_texts(self, original, corrected):
        """
        比对原句和修正后的句子，找出不同的字符及其位置。
        :param original: 原始文本
        :param corrected: 修正后的文本
        :return: 错误列表 [(位置, 正确字符)]
        """
        errors = []
        for i, (orig_char, corr_char) in enumerate(zip(original, corrected)):
            if orig_char != corr_char:
                errors.append((i + 1, corr_char))  # 记录位置（从1开始）和正确字符
        return errors


class HeritageCategoryModule:
    def __init__(self, openai_client):
        self.client = openai_client
        self.base_prompt = """请执行以下操作：
1. 忽略人名，提取技艺名称（如：制陶、制茶等,而非具体的实例如"茉莉花茶"或"紫砂壶"）
2. 如果输入文本中包含具体的实例（如"茉莉花茶"或"紫砂壶"或"烘青绿茶"），应将其归类为对应的通用技艺名称（如"制茶"）
3. 严格匹配非遗类别，从：民间文学、传统音乐、传统舞蹈、传统戏剧、曲艺、传统体育、游艺与杂技、传统美术、传统技艺、传统医药、民间文学中选一个
4、只能从我写出的非遗类别中匹配，不允许写其他类别，不允许多字少字
4. 按JSON格式返回：
{
    "category": "最匹配的类别",
    "skill": "核心技艺名称",
    "confidence": 0.9
}

示例：
输入："侗族大歌"
输出：{"category": "传统音乐", "skill": "大歌", "confidence": 0.95}
输入："李红在制作茉莉花茶时采用了福建烘青绿茶为茶坯"
输出：{"category": "传统技艺", "skill": "制茶", "confidence": 0.9}"""

    def get_heritage_category(self, recognized_text):
        try:
            ai_result = self._get_ai_analysis(recognized_text)
            if ai_result["confidence"] >= 0.6:
                return f"这是非遗{ai_result['category']}类别的{ai_result['skill']}技艺"
            return self._hybrid_fallback(recognized_text)
        except Exception as e:
            print(f"[分析异常] {str(e)}")
            return self._enhanced_fallback(recognized_text)

    def _get_ai_analysis(self, text):
        client = OpenAI(api_key="sk-jjzlqcuovvqahaudwzkhmoeysnwxelouefhgvxmzytuulbhj",
                        base_url="https://api.siliconflow.cn/v1")
        try:
            print("正在发送请求...")
            print(f"请求内容: model='Qwen/QVQ-72B-Preview', messages={self.base_prompt}, user_content={text}")
            response = client.chat.completions.create(
                model='Qwen/Qwen2.5-72B-Instruct-128K',
                messages=[
                    {"role": "system", "content": self.base_prompt},
                    {"role": "user", "content": f"分析文本：{text}"}
                ],
                temperature=0.2,
                max_tokens=len(text) * 2
            )
            print("API 响应内容:", response)  # 打印完整响应
            raw_response = response.choices[0].message.content
            # print("模型返回的原始内容（带不可见字符）:", repr(raw_response))
            if raw_response.startswith('```json'):
                # 提取纯JSON部分
                json_str = raw_response.split('```json')[1].split('```')[0].strip()
            else:
                json_str = raw_response
            result = json.loads(json_str)
            print(result)
            return {
                "category": result["category"].strip(),
                "skill": self._normalize_skill_name(result["skill"]),
                "confidence": float(result["confidence"])
            }
        except Exception as e:
            print(f"[API 调用异常] {str(e)}")
            return {
                "category": "未知",
                "skill": text,
                "confidence": 0
            }

    def _normalize_skill_name(self, skill_str):
        remove_words = ["技艺", "工艺", "技术", "的", "制作"]
        for word in remove_words:
            skill_str = skill_str.replace(word, "")
        return skill_str.strip() or "传统技艺"

    def _hybrid_fallback(self, text):
        keyword_mapping = {
            "制茶": "传统技艺",
            "茉莉花茶": "传统技艺",
            "烘青绿茶": "传统技艺",
            "茶坯": "传统技艺",
            "陶器": "传统技艺",
            "紫砂": "传统技艺"
        }
        for kw, cat in keyword_mapping.items():
            if kw in text:
                return f"这是非遗{cat}类别的{kw}技艺"
        categories = ["传统技艺", "传统音乐", "传统舞蹈",
                      "传统戏剧", "传统美术", "传统医药", "民俗"]
        for cat in categories:
            if cat in text:
                return f"检测到{cat}类别"
        return "无法识别非遗类别"

    def _enhanced_fallback(self, text):
        if any(kw in text for kw in ["制茶", "茶叶", "茶培"]):
            return "这是非遗传统技艺类别的制茶技艺"
        elif "陶" in text:
            return "这是非遗传统技艺类别的制陶技艺"
        return "无法识别非遗类别（备用方案）"


def paizhaopian():
    from chengxu.paizhao_module import run_paizhao
    run_paizhao(
        client=OpenAI(
            api_key="sk-jjzlqcuovvqahaudwzkhmoeysnwxelouefhgvxmzytuulbhj",
            base_url="https://api.siliconflow.cn/v1"
        ),
        display_module=DisplayModule(),
        voice_module=VoiceModule()
    )


class MainModule:
    def __init__(self):
        self.client = OpenAI(
            api_key="sk-jjzlqcuovvqahaudwzkhmoeysnwxelouefhgvxmzytuulbhj",
            base_url="https://api.siliconflow.cn/v1"
        )
        self.validator = PasswordValidator()
        # self.cap = cv2.VideoCapture(0)  # 自动初始化摄像头
        # GPIO.setmode(GPIO.BCM)  # 自动初始化GPIO
        self.ocr_module = OCRModule()
        self.voice_module = VoiceModule()
        self.display_module = DisplayModule()
        self.typo_detection_module = TypoDetectionModule()
        self.heritage_category_module = HeritageCategoryModule(self.client)
        self.face_recognizer = OfflineFaceRecognizer()
        self.is_extension_task_two = False  # 新增标志变量
        self.task_count = 0  # 任务计数器
        self.task3_done = False  # 任务四完成标记
        self.offline_model = joblib.load("hexinwenjianbao/text_classifier.pkl")
        self.vectorizer = joblib.load("hexinwenjianbao/tfidf_vectorizer.pkl")
    def _safe_release_camera(self, camera):
        """安全释放摄像头资源的辅助方法"""
        try:
            if hasattr(camera, 'camera_capture') and camera.camera_capture.isOpened():
                camera.camera_capture.release()
            cv2.destroyAllWindows()
        except Exception as e:
            print(f"释放摄像头资源时出错: {e}")

    def offline_recognize(self, input_text):
        """离线识别文本的类别和置信度"""
        # 将文本转换为数字
        X_test = self.vectorizer.transform([input_text])
        # 获取所有类别的概率
        predicted_probas = self.offline_model.predict_proba(X_test)
        # 获取最高类别的索引
        max_class_index = np.argmax(predicted_probas, axis=1)
        # 获取类别名称
        max_class_name = self.offline_model.classes_[max_class_index][0]
        # 获取最高置信度
        max_confidence = np.max(predicted_probas, axis=1)[0]
        return max_class_name, max_confidence

    def main(self):
        """主控制循环（支持通用任务切换）"""

        self.validator.validate()
        init_result = self.face_recognizer.show_initial_interface()
        if init_result == "拓展任务二":
            return self._handle_extension_task_two()
        elif not init_result:
            return

        camera = CameraModule()
        current_task = None  # 当前任务状态

        while True:
            try:
                # 如果没有当前任务，则拍照并识别任务关键词
                if gpio_handler.check_keypress() == 121:
                    current_task = "拓展任务一"
                    continue
                elif gpio_handler.check_keypress() == 101:
                    current_task = "拓展任务二"
                    continue
                if not current_task:
                    filename = self._capture_and_process(camera, prompt="请按下空格键拍照触发任务...")
                    # 处理特殊标记
                    if filename == "skip_to_extension_task":
                        current_task = "拓展任务一"
                        continue  # 跳过后续处理
                    elif filename == "skip_to_extension_task_two":
                        current_task = "拓展任务二"
                        continue  # 跳过后续处理
                    recognized_text = self.ocr_module.ocr_image_to_text(filename)
                    recognized_text = recognized_text.strip().replace("\n", "").replace(" ", "")
                    print(f"[主循环] 识别内容: {recognized_text}")

                    # 根据识别内容设置当前任务
                    if "博物智能" in  recognized_text:
                        current_task = "博物智能"
                    if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                        current_task = "任务一"
                    if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                        current_task = "任务一"
                    elif "任务二" in recognized_text and "拓展任务二" not in recognized_text:
                        current_task = "任务二"
                    elif "任务三" in recognized_text:
                        current_task = "任务三"
                    elif "任务四" in recognized_text:
                        current_task = "任务四"
                    # 在main循环的current_task判断部分添加：
                    elif self._check_extension_task_one_keyword(recognized_text) or (
                            gpio_handler.check_keypress() == 121):  # 121是Y键的键值
                        current_task = "拓展任务一"
                    elif self._check_extension_task_two_keyword(recognized_text) or (
                            gpio_handler.check_keypress() == 101):  # 101是E键的键值
                        current_task = "拓展任务二"
                    else:
                        print("未识别到有效任务关键词")
                        continue

                # 根据当前任务状态执行对应逻辑
                if current_task == "博物智能":
                    current_task = self._handle_museum_ai_task()
                if current_task == "任务一":
                    new_photo = self._handle_task_one(camera)
                    if new_photo:
                        self._process_new_photo(new_photo)
                    current_task = None  # 重置任务状态
                elif current_task == "任务二":
                    current_task = self._handle_task_two(camera)  # 返回新任务状态
                elif current_task == "任务三":
                    current_task = self._handle_task_three(camera)  # 返回新任务状态
                elif current_task == "任务四":
                    current_task = self._handle_task_four(camera)  # 返回新任务状态
                elif current_task == "拓展任务一":
                    current_task = self._handle_extension_task(camera)  # 返回新任务状态
                elif current_task == "拓展任务二":
                    current_task = self._handle_extension_task_two()  # 新增拓展任务二处理
                    break

            except Exception as e:
                print(f"主循环异常: {str(e)}")
                self.display_module.quit()
                break

    def _check_extension_task_one_keyword(self, recognized_text):
        """检查识别的文本是否与拓展任务一的关键词匹配"""
        try:
            # 读取拓展任务一进入提示词.txt文件的第一行
            with open("task_file/tzrwyml/T1TSC.txt", "r", encoding="utf-8") as f:
                keyword = f.readline().strip()

            # 如果文件为空或无法读取，使用默认关键词
            if not keyword:
                print("提示词文件为空，使用默认关键词'拓展任务一'")
                return "拓展任务一" in recognized_text

            # 检查识别的文本是否包含关键词
            if keyword in recognized_text:
                print(f"检测到拓展任务一关键词：{keyword}")
                return True
            return False
        except FileNotFoundError:
            print("未找到拓展任务一进入提示词.txt文件，使用默认关键词")
            return "拓展任务一" in recognized_text
        except Exception as e:
            print(f"读取拓展任务一关键词时出错: {e}")
            return "拓展任务一" in recognized_text

    def _check_extension_task_two_keyword(self, recognized_text):
        """检查识别的文本是否与拓展任务二的关键词匹配"""
        try:
            # 读取拓展任务二进入提示词.txt文件的第一行
            with open("task_file/tzrweml/T2TS.txt", "r", encoding="utf-8") as f:
                keyword = f.readline().strip()

            # 如果文件为空或无法读取，使用默认关键词
            if not keyword:
                print("提示词文件为空，使用默认关键词'拓展任务二'")
                return "拓展任务二" in recognized_text

            # 检查识别的文本是否包含关键词
            if keyword in recognized_text:
                print(f"检测到拓展任务二关键词：{keyword}")
                return True
            return False
        except FileNotFoundError:
            print("未找到拓展任务二进入提示词.txt文件，使用默认关键词")
            return "拓展任务二" in recognized_text
        except Exception as e:
            print(f"读取拓展任务二关键词时出错: {e}")
            return "拓展任务二" in recognized_text

    def _after_task_executed(self):
        """每次任务执行后的通用操作"""
        self.task_count += 1
        print(f"当前任务计数: {self.task_count}")

        # 检查完成条件
        if self.task_count >= 3 and self.task3_done:
            # self._show_completion_message()
            self._reset_counter()  # 重置计数器

    def _show_completion_message(self):
        """显示挑战完成提示"""
        global TASK_COMPLETED  # 使用全局变量
        TASK_COMPLETED = True  # 设置任务完成标志
        completion_text = "挑战任务已完成！"
        self.display_module.display_text_and_play_audio(completion_text, voice_module=None)

        # 播放挑战完成音频
        self.voice_module.play_audio("music/挑战完成.MP3")

    def _reset_counter(self):
        """重置计数器和标记"""
        global TASK_COMPLETED
        self.task_count = 0
        self.task3_done = False
        TASK_COMPLETED = False

    def _handle_museum_ai_task(self):
        """处理博物智能任务"""
        try:
            # 读取问问我的.txt文件内容
            with open("task_file/www.txt", "r", encoding="utf-8") as f:
                question = f.read().strip()
                question = f"{question},只需要回答答案，不需要任何解释."

            if not question:
                return "问题文件为空"

            print(f"[博物智能] 问题内容: {question}")

            # 调用大模型分析
            response = self.client.chat.completions.create(
                model='Pro/deepseek-ai/DeepSeek-V3',
                messages=[{"role": "user", "content": question}],
                temperature=0
            )

            answer = response.choices[0].message.content.strip()
            output = f"答案是：{answer}，保护好车"

            # 显示和播报结果
            self.display_module.display_text_and_play_audio(output, voice_module = None)
            return None

        except Exception as e:
            print(f"[博物智能任务异常] {str(e)}")
            return "处理博物智能问题时出错"
    def _handle_task_one(self, camera):
        self.voice_module.play_local_audio(1)
        self.display_module.display_task_prompt("任务一")
        print("\n=== 进入任务一流程 ===")
        try:
            camera = camera or CameraModule()
            user_name = self.face_recognizer.recognize_user()
            if user_name:
                # 获取用户索引
                user_id = self.face_recognizer.names.index(user_name)
                # 根据索引获取职责信息
                # role = self.face_recognizer.roles[user_id] if user_id < len(self.face_recognizer.roles) else "未知身份"
                result_text = f"这是{user_name}"
                role = ""
                if user_id < len(self.face_recognizer.roles):
                    role = self.face_recognizer.roles[user_id].strip()  # 清除前后空格
                if role:  # 仅在角色信息非空时追加
                    result_text += f"，{role}"
                self.display_module.display_text_and_play_audio(result_text, self.voice_module)
            else:
                self.voice_module.baidu_tts("人脸识别失败")
        finally:
            self._safe_release_camera(camera) 
            self._after_task_executed()

    def _handle_task_two(self, camera=None):
        """封装任务二处理逻辑"""
        try:
            self.voice_module.play_local_audio(2)
            print("\n=== 进入任务二流程 ===")
            self.display_module.display_task_prompt("任务二")  # 新增任务提示
            camera = camera or CameraModule()

            # 读取任务二大模型开关设置
            use_online_model = True  # 默认使用在线大模型
            try:
                with open("task_file/rweml/J2KG.txt", "r", encoding="utf-8") as f:
                    switch_setting = f.read().strip().lower()
                    if switch_setting == "off":
                        use_online_model = False
                        print("任务二大模型开关设置为OFF，将使用本地文件比对")
                    else:
                        print("任务二大模型开关设置为ON，将使用在线大模型")
            except FileNotFoundError:
                print("未找到任务二大模型开关文件，使用默认在线大模型")

            while True:  # 添加任务内循环
                filename = self._capture_and_process(camera, "请拍照进行错别字检测...")
                # 处理特殊标记
                if filename == "skip_to_extension_task":
                    self._after_task_executed()
                    return "拓展任务一"  # 立即切换任务
                if not filename:
                    continue
                recognized_text = self.ocr_module.ocr_image_to_text(filename)
                recognized_text = recognized_text.strip().replace("\n", "").replace(" ", "")
                print(f"[任务二] 识别内容: {recognized_text}")

                # 检查是否识别到新的任务关键词
                if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                    print("检测到任务一关键词，切换到任务一...")
                    self._after_task_executed()
                    return "任务一"  # 返回新任务状态
                elif "任务三" in recognized_text:
                    print("检测到任务三关键词，切换到任务三...")
                    self._after_task_executed()
                    return "任务三"  # 返回新任务状态
                elif "任务四" in recognized_text:
                    print("检测到任务四关键词，切换到任务四...")
                    self._after_task_executed()
                    return "任务四"  # 返回新任务状态
                elif "拓展任务一" in recognized_text:
                    print("检测到拓展任务一关键词，切换到拓展任务一...")
                    return "拓展任务一"  # 返回新任务状态
                elif "任务二" in recognized_text and "拓展任务二" not in recognized_text:
                    # 再次识别到任务二，重新播报"开始任务二"
                    print("再次检测到任务二关键词，重新开始任务二...")
                    self.voice_module.play_local_audio(2)
                    self.display_module.display_task_prompt("任务二")
                    continue  # 跳过后续逻辑，直接进入下一次循环

                # 错别字检测逻辑
                if use_online_model:
                    # 使用大模型检测错别字
                    print("使用大模型检测错别字")
                    result = self.typo_detection_module.process_user_input(recognized_text)
                else:
                    # 使用本地文件比对
                    print("使用本地文件比对检测错别字")
                    try:
                        with open("task_file/rweml/J2ZQWZ.txt", "r", encoding="utf-8") as f:
                            correct_text = f.read().strip()
                        
                        # 比对识别文本和正确文本
                        result = self._compare_with_correct_text(recognized_text, correct_text)
                    except FileNotFoundError:
                        print("未找到任务二正确文字.txt文件，回退到大模型检测")
                        result = self.typo_detection_module.process_user_input(recognized_text)
                
                self.display_module.display_text_and_play_audio(result, self.voice_module)
        finally:
                # 确保在任何情况下都会释放摄像头资源
            self._safe_release_camera(camera)
            self._after_task_executed()

    def _compare_with_correct_text(self, recognized_text, correct_text):
        """比对识别文本和正确文本，找出错别字"""
        if len(recognized_text) != len(correct_text):
            print(f"警告：识别文本长度({len(recognized_text)})与正确文本长度({len(correct_text)})不一致")
            # 取较短的长度进行比对
            compare_length = min(len(recognized_text), len(correct_text))
        else:
            compare_length = len(recognized_text)
        
        errors = []
        for i in range(compare_length):
            if recognized_text[i] != correct_text[i]:
                errors.append((i + 1, correct_text[i]))  # 记录位置（从1开始）和正确字符
        
        if not errors:
            return "未检测到错别字。"
        
        # 只取第一个错误（任务要求只有一个错别字）
        error_pos, correct_char = errors[0]
        error_positions = "和".join([f"第{pos}个字" for pos, _ in errors])
        corrections = "和".join([f"{char}" for _, char in errors])
        return f"{error_positions}有误，请更正为{corrections}。"

    def _handle_task_three(self, camera=None):
        try:
            """封装任务三处理逻辑"""
            global TASK_COMPLETED
            self.voice_module.play_local_audio(3)
            print("\n=== 进入任务三流程 ===")
            self.display_module.display_task_prompt("任务三")  # 新增任务提示
            photo_count = 0
            camera = camera or CameraModule()

            # 读取大模型调用开关设置
            use_relative_confidence = True  # 默认使用相对置信度策略
            try:
                with open("task_file/rwsml/J3KG.txt", "r", encoding="utf-8") as f:
                    switch_setting = f.read().strip().lower()
                    if switch_setting == "off":
                        use_relative_confidence = False
                        print("大模型调用开关设置为OFF，将使用纯离线模式")
                    else:
                        print("大模型调用开关设置为ON，将使用相对置信度策略")
            except FileNotFoundError:
                print("未找到大模型调用开关文件，使用默认相对置信度策略")

            while True:  # 添加任务内循环
                filename = self._capture_and_process(camera, "请拍照识别技艺名称...")
                # 处理特殊标记
                if filename == "skip_to_extension_task":
                    self._after_task_executed()
                    return "拓展任务一"  # 立即切换任务
                elif filename == "skip_to_extension_task_two":
                    self._after_task_executed()
                    return "拓展任务二"  # 立即切换任务
                if not filename:
                    continue
                recognized_text = self.ocr_module.ocr_image_to_text(filename)
                recognized_text = recognized_text.strip().replace("\n", "").replace(" ", "")
                print(f"[任务三] 识别内容: {recognized_text}")

                # 检查是否识别到新的任务关键词
                if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                    print("检测到任务一关键词，切换到任务一...")
                    self._after_task_executed()
                    return "任务一"  # 返回新任务状态
                elif "任务二" in recognized_text and "拓展任务二" not in recognized_text:
                    print("检测到任务二关键词，切换到任务二...")
                    self._after_task_executed()
                    return "任务二"  # 返回新任务状态
                elif "任务四" in recognized_text:
                    print("检测到任务四关键词，切换到任务四...")
                    self._after_task_executed()
                    return "任务四"  # 返回新任务状态
                elif self._check_extension_task_one_keyword(recognized_text):  # 修改这里，使用_check_extension_task_one_keyword方法
                    print("检测到拓展任务一关键词，切换到拓展任务一...")
                    self._after_task_executed()
                    return "拓展任务一"  # 返回新任务状态
                elif "任务三" in recognized_text:
                    # 再次识别到任务三，重新播报"开始任务三"
                    print("再次检测到任务三关键词，重新开始任务三...")
                    self._after_task_executed()
                    self.voice_module.play_local_audio(3)
                    self.display_module.display_task_prompt("任务三")
                    continue  # 跳过后续逻辑，直接进入下一次循环
                filtered_text = recognized_text
                for keyword in ["任务一", "任务二", "任务三", "任务四", "拓展任务一", "拓展任务二"]:
                    filtered_text = filtered_text.replace(keyword, "")

                # 如果过滤后文本为空，跳过本次识别
                if not filtered_text.strip():
                    print("过滤任务关键词后文本为空，跳过本次识别")
                    continue

                # 先进行离线识别
                X_test = self.vectorizer.transform([recognized_text])
                all_probas = self.offline_model.predict_proba(X_test)[0]

                # 获取类别名称
                class_names = self.offline_model.classes_

                # 创建类别-概率对，并按概率降序排序
                class_proba_pairs = [(class_names[i], proba) for i, proba in enumerate(all_probas)]
                sorted_pairs = sorted(class_proba_pairs, key=lambda x: x[1], reverse=True)

                # 获取最高置信度和第二高置信度
                top_class, top_confidence = sorted_pairs[0]
                second_class, second_confidence = sorted_pairs[1] if len(sorted_pairs) > 1 else (None, 0)

                # 计算相对优势 (最高置信度与第二高置信度的差距)
                relative_advantage = top_confidence - second_confidence

                # 计算相对置信度 (最高置信度占总置信度的比例)
                total_confidence = sum(proba for _, proba in class_proba_pairs)
                relative_confidence = top_confidence / total_confidence if total_confidence > 0 else 0

                print(f"置信度排序: {sorted_pairs}")
                print(f"最高类别: {top_class[0]}, 置信度: {top_confidence:.4f}")
                print(f"第二高类别: {second_class[0] if second_class else 'None'}, 置信度: {second_confidence:.4f}")
                print(f"相对优势: {relative_advantage:.4f}, 相对置信度: {relative_confidence:.4f}")

                # 根据开关设置决定使用哪种策略
                if use_relative_confidence:
                    response = self.get_skill_category(recognized_text)
                else:
                    # 纯离线模式，直接使用置信度最高的类别
                    print(f"使用离线模式，选择置信度最高的类别: {top_class} (置信度: {top_confidence:.4f})")
                    response = f"{recognized_text}属于{top_class}"
                response = response.replace('\n', '').replace('\r', '').strip()
                photo_count += 1

                # 当拍摄了6张照片后，标记任务三为完成
                if photo_count >= 6:
                    self._after_task_executed()

                    # 检查是否需要显示挑战完成消息
                    if self.task_count >= 3:
                        TASK_COMPLETED = True
                        # 显示当前识别结果和挑战完成消息
                        self.display_module.display_text_and_play_audio(response, self.voice_module)
                        time.sleep(1)
                        self.task3_done = True
                        self._after_task_executed()
                        self._show_completion_message()
                    else:
                        TASK_COMPLETED = False
                        # 只显示当前识别结果
                        self.display_module.display_text_and_play_audio(response, self.voice_module)
                    return None
                else:
                    TASK_COMPLETED = False
                    # 不是最后一张照片，正常显示结果并等待按键
                    self.display_module.display_text_and_play_audio(response, self.voice_module)

                # 如果已经完成6张照片但任务数不足3个，返回None
                if photo_count >= 6:
                    return None
        finally:
            self._safe_release_camera(camera) 
            cv2.destroyAllWindows()

    def _handle_task_four(self, camera=None):
        try:
            """封装任务四处理逻辑"""
            self.voice_module.play_local_audio(4)
            print("\n=== 进入任务四流程 ===")
            self.display_module.display_task_prompt("任务四")  # 新增任务提示
            camera = camera or CameraModule()

            # 读取大模型调用开关设置
            use_online_model = True  # 默认使用在线大模型
            try:
                with open("task_file/rwsiml/J4KG.txt", "r", encoding="utf-8") as f:
                    switch_setting = f.read().strip().lower()
                    if switch_setting == "off":
                        use_online_model = False
                        print("大模型调用开关设置为OFF，将使用纯离线模式")
                    else:
                        print("大模型调用开关设置为ON，将使用在线大模型")
            except FileNotFoundError:
                print("未找到大模型调用开关文件，使用默认在线大模型")

            # 加载任务四专用的离线模型
            task4_offline_model = None
            task4_vectorizer = None
            category_to_name = {}  # 类别到非遗名称的映射
            
            try:
                if not use_online_model:
                    # 使用训练好的模型文件，这些文件是由任务四训练.py生成的
                    task4_offline_model = joblib.load("hexinwenjianbao/task4_classifier.pkl")
                    task4_vectorizer = joblib.load("hexinwenjianbao/task4_vectorizer.pkl")
                    print("成功加载任务四专用离线模型")
                    
                    # 尝试加载类别到非遗名称的映射
                    try:
                        # 修改编码方式，尝试不同的编码
                        import pandas as pd
                        train_data = pd.read_csv("task_file/任务四目录/任务四词语.csv", encoding='utf-8-sig')
                        category_to_name = dict(zip(train_data["类别"], train_data["非遗名称"]))
                        print("成功加载类别到非遗名称的映射")
                        print("自动加载的类别映射:", category_to_name)
                    except Exception as e:
                        print(f"加载类别映射失败: {e}")
            except Exception as e:
                print(f"加载任务四离线模型失败: {e}")
                use_online_model = True  # 如果加载失败，回退到在线模型

            while True:  # 添加任务内循环
                filename = self._capture_and_process(camera, "请拍照识别非遗类别...")
                if filename == "skip_to_extension_task":
                    return "拓展任务一"  # 立即切换任务
                if filename == "skip_to_extension_task_two":
                    return "拓展任务二"  # 立即切换任务
                if not filename:
                    continue
                recognized_text = self.ocr_module.ocr_image_to_text(filename)
                recognized_text = recognized_text.strip().replace("\n", "").replace(" ", "")
                print(f"[任务四] 识别内容: {recognized_text}")

                # 检查是否识别到新的任务关键词
                if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                    print("检测到任务一关键词，切换到任务一...")
                    self._after_task_executed()
                    return "任务一"  # 返回新任务状态
                elif "任务二" in recognized_text and "拓展任务二" not in recognized_text:
                    print("检测到任务二关键词，切换到任务二...")
                    self._after_task_executed()
                    return "任务二"  # 返回新任务状态
                elif "任务三" in recognized_text:
                    print("检测到任务三关键词，切换到任务三...")
                    self._after_task_executed()
                    return "任务三"  # 返回新任务状态
                elif "拓展任务一" in recognized_text:
                    print("检测到拓展任务一关键词，切换到拓展任务一...")
                    self._after_task_executed()
                    return "拓展任务一"  # 返回新任务状态
                elif "任务四" in recognized_text:
                    # 再次识别到任务四，重新播报"开始任务四"
                    print("再次检测到任务四关键词，重新开始任务四...")
                    self._after_task_executed()
                    self.voice_module.play_local_audio(4)
                    self.display_module.display_task_prompt("任务四")
                    continue  # 跳过后续逻辑，直接进入下一次循环

                # 非遗类别判断逻辑
                if use_online_model:
                    # 使用在线大模型
                    print("使用在线大模型进行非遗类别判断")
                    response = self.heritage_category_module.get_heritage_category(recognized_text)
                else:
                    # 使用任务四专用的离线模型
                    print("使用任务四专用离线模型进行非遗类别判断")
                    try:
                        X_test = task4_vectorizer.transform([recognized_text])
                        predicted_label = task4_offline_model.predict(X_test)[0]
                        confidence = np.max(task4_offline_model.predict_proba(X_test)[0])
                        
                        # 获取非遗名称
                        predicted_name = category_to_name.get(predicted_label, "未知非遗名称")
                        
                        print(f"离线模型预测类别: {predicted_label}, 非遗名称: {predicted_name}, 置信度: {confidence:.4f}")
                        # 使用正确的播报格式
                        response = f"这是{predicted_label}类别中的{predicted_name}"
                    except Exception as e:
                        print(f"任务四离线模型预测失败: {e}")
                        response = "无法识别非遗类别"
                
                self.display_module.display_text_and_play_audio(response, self.voice_module)
        finally:
            self._safe_release_camera(camera) 
            cv2.destroyAllWindows()

    def _handle_extension_task(self, camera=None):
        """封装拓展任务一处理逻辑"""
        self.voice_module.play_local_audio(5)
        print("\n=== 进入拓展任务一流程 ===")
        self.display_module.display_task_prompt("拓展任务一")
        camera = camera or CameraModule()

        # 读取拓展任务一需要拍照的次数
        try:
            with open("task_file/tzrwyml/T1SHU.txt", "r", encoding="utf-8") as file:
                photo_count = int(file.read().strip())
                if photo_count <= 0:
                    photo_count = 1
        except (FileNotFoundError, ValueError):
            print("未找到拍照数文件或格式错误，默认拍摄1张")
            photo_count = 1

        # 读取拓展任务一计算方式开关设置
        use_online_model = True
        try:
            with open("task_file/tzrwyml/T1KG.txt", "r", encoding="utf-8") as f:
                switch_setting = f.read().strip().lower()
                if switch_setting == "off":
                    use_online_model = False
                    print("拓展任务一开关设置为OFF，将使用外置程序计算")
                else:
                    print("拓展任务一开关设置为ON，将使用在线大模型计算")
        except FileNotFoundError:
            print("未找到拓展任务一开关文件，使用默认在线大模型")

        print(f"拓展任务一需要拍摄 {photo_count} 张照片")
        collected_values = []

        # 拍照并提取数值
        for i in range(photo_count):
            self.display_module.display_text_and_play_audio(f"请拍摄第 {i + 1}/{photo_count} 张照片", voice_module=None)
            while True:
                filename = self._capture_and_process(camera, f"请拍摄第 {i + 1}/{photo_count} 张照片")
                if filename == "skip_to_extension_task":
                    return "拓展任务一"
                elif filename == "skip_to_extension_task_two":
                    return "拓展任务二"
                if not filename:
                    continue
                recognized_text = self.ocr_module.ocr_image_to_text(filename)
                recognized_text = recognized_text.strip().replace("\n", "").replace(" ", "")
                print(f"[拓展任务一] 第 {i + 1} 张照片识别内容: {recognized_text}")

                # 检查是否识别到新的任务关键词
                if "任务一" in recognized_text and "拓展任务一" not in recognized_text:
                    print("检测到任务一关键词，切换到任务一...")
                    return "任务一"
                elif "任务二" in recognized_text and "拓展任务二" not in recognized_text:
                    print("检测到任务二关键词，切换到任务二...")
                    return "任务二"
                elif "任务三" in recognized_text:
                    print("检测到任务三关键词，切换到任务三...")
                    return "任务三"
                elif "任务四" in recognized_text:
                    print("检测到任务四关键词，切换到任务四...")
                    return "任务四"
                elif self._check_extension_task_two_keyword(recognized_text):
                    print("检测到拓展任务二关键词，切换到拓展任务二...")
                    return "拓展任务二"
                elif self._check_extension_task_one_keyword(recognized_text):
                    print("再次检测到拓展任务一关键词，重新开始拓展任务一...")
                    self.voice_module.play_local_audio(5)
                    self.display_module.display_task_prompt("拓展任务一")
                    collected_values = []
                    break

                # 尝试提取数字
                import re
                numbers = re.findall(r'\d+\.?\d*', recognized_text)
                if numbers:
                    collected_values.append(numbers[0])
                    print(f"成功提取数值: {numbers[0]}")
                    break
                else:
                    self.display_module.display_text_and_play_audio("未识别到数值，请重新拍摄", self.voice_module)

        if not collected_values:
            self.display_module.display_text_and_play_audio("未能成功识别任何数值", self.voice_module)
            return None

        print(f"收集到的所有数值: {collected_values}")

        # 读取题目模板和直接答案
        try:
            with open("task_file/tzrwyml/T1TM.txt", "r", encoding="utf-8") as file:
                lines = file.readlines()
                while len(lines) < 4:
                    lines.append("")
                lines = [line.strip() for line in lines]
                question_lines = [line for line in lines[:3] if line]
                question_template = "\n".join(question_lines).strip()
                direct_answer = lines[3].strip()
                print(f"题目模板: {question_template}")
                print(f"第四行内容: {direct_answer}")

                if direct_answer:
                    output = direct_answer
                    for i, value in enumerate(collected_values, 1):
                        placeholder = f"{{value_{i}}}"
                        if placeholder in output:
                            output = output.replace(placeholder, value)
                    if "{all_values}" in output:
                        all_values_str = ", ".join(collected_values)
                        output = output.replace("{all_values}", all_values_str)
                    self.display_module.display_text_and_play_audio(output, self.voice_module)
                    return None
        except FileNotFoundError:
            print("未找到题目文件 '拓展任务一题目.txt'，使用默认题目模板")
            question_template = "一张正方形红纸，边长{value_1}厘米，要剪最大圆形。请计算剩余面积（π=3.14）"
            direct_answer = ""

        # 计算与播报
        if use_online_model:
            print("使用大模型计算结果")
            prompt = question_template
            for i, value in enumerate(collected_values, 1):
                placeholder = f"{{value_{i}}}"
                if placeholder in prompt:
                    prompt = prompt.replace(placeholder, value)
            if "{all_values}" in prompt:
                all_values_str = ", ".join(collected_values)
                prompt = prompt.replace("{all_values}", all_values_str)
            print(f"构造的提示词: {prompt}")

            response = self.client.chat.completions.create(
                model='deepseek-ai/DeepSeek-V3',
                messages=[{"role": "user", "content": prompt}],
                temperature=0
            )
            result = response.choices[0].message.content.strip()
            print(f"大模型返回结果: {result}")
            if "," in result or "，" in result:
                result_list = [x.strip() for x in result.replace("，", ",").split(",") if x.strip()]
            else:
                result_list = [result]
            try:
                with open("task_file/tzrwyml/T1DA.txt", "r", encoding="utf-8") as file:
                    template = file.read().strip()
                    if len(result_list) == 1:
                        output = template.format(result=result_list[0], result1=result_list[0])
                    elif len(result_list) == 2:
                        output = template.format(result=result, result1=result_list[0], result2=result_list[1])
                    else:
                        output = template.format(
                            result=result,
                            result1=result_list[0] if len(result_list) > 0 else "",
                            result2=result_list[1] if len(result_list) > 1 else "",
                            result3=result_list[2] if len(result_list) > 2 else "",
                            result4=result_list[3] if len(result_list) > 3 else "",
                            result5=result_list[4] if len(result_list) > 4 else ""
                        )
            except (FileNotFoundError, KeyError, IndexError):
                if len(result_list) == 1:
                    output = f"计算结果为: {result_list[0]}"
                elif len(result_list) == 2:
                    output = f"第一个结果为: {result_list[0]}，第二个结果为: {result_list[1]}"
                else:
                    output = "，".join([f"结果{i+1}: {v}" for i, v in enumerate(result_list)])
        else:
            print("使用外置程序计算结果")
            try:
                from task_file.tzrwyml.T1CX import run_task
                result = run_task(collected_values)
                print(f"外置程序返回结果: {result}")
                # 支持外部程序返回多个结果
                if isinstance(result, (list, tuple)):
                    result_list = [str(x) for x in result]
                elif isinstance(result, str) and ("," in result or "，" in result):
                    result_list = [x.strip() for x in result.replace("，", ",").split(",") if x.strip()]
                else:
                    result_list = [str(result)]
                try:
                    with open("task_file/tzrwyml/T1DA.txt", "r", encoding="utf-8") as file:
                        template = file.read().strip()
                        if len(result_list) == 1:
                            output = template.format(result=result_list[0], result1=result_list[0])
                        elif len(result_list) == 2:
                            output = template.format(result=result, result1=result_list[0], result2=result_list[1])
                        else:
                            output = template.format(
                                result=result,
                                result1=result_list[0] if len(result_list) > 0 else "",
                                result2=result_list[1] if len(result_list) > 1 else "",
                                result3=result_list[2] if len(result_list) > 2 else "",
                                result4=result_list[3] if len(result_list) > 3 else "",
                                result5=result_list[4] if len(result_list) > 4 else ""
                            )
                except (FileNotFoundError, KeyError, IndexError):
                    if len(result_list) == 1:
                        output = f"计算结果为: {result_list[0]}"
                    elif len(result_list) == 2:
                        output = f"第一个结果为: {result_list[0]}，第二个结果为: {result_list[1]}"
                    else:
                        output = "，".join([f"结果{i+1}: {v}" for i, v in enumerate(result_list)])
            except Exception as e:
                print(f"调用外置程序失败: {e}")
                output = "计算失败，请重试。"

        self._safe_release_camera(camera)
        self.display_module.display_text_and_play_audio(output, self.voice_module)
        return None
    def _handle_extension_task_two(self):
        """拓展任务二：自动拍照逻辑"""
        global IS_EXTENSION_TASK_TWO  # 声明全局变量
        IS_EXTENSION_TASK_TWO = True  # 修改全局状态
        if hasattr(self, 'cap') and self.cap.isOpened():
            self.cap.release()
            cv2.destroyAllWindows()
        self.voice_module.play_local_audio(6)  # 播放任务提示音频
        print("\n=== 进入拓展任务二流程 ===")
        self.display_module.display_task_prompt("拓展任务二")
        time.sleep(1)
        print("等待用户按下GPIO18或GPIO23按钮...")
        
        # 不重新设置GPIO模式，使用已有的gpio_handler
        # 监听GPIO按钮
        button_pressed = False
        start_time = time.time()
        timeout = 3  # 10秒超时

        try:
            while not button_pressed and (time.time() - start_time) < timeout:
                # 使用gpio_handler检测按键
                gpio_key = gpio_handler.check_keypress()
                
                if gpio_key == 32:  # GPIO18触发空格键
                    print("检测到GPIO18按下，执行麦克纳姆轮小车程序一个点位")
                    from chengxu.mknmlxcygdw import run_task
                    run_task()
                    button_pressed = True
                    break

                # 检查GPIO23
                elif gpio_key == 1073742053:  # GPIO23触发目标键
                    print("检测到GPIO23按下，执行麦克纳姆轮小车程序两个点位")
                    from chengxu.mknmlxclgdw import run_task
                    run_task()
                    button_pressed = True
                    break

                time.sleep(0.1)  # 短暂延时，减少CPU使用

            # 如果超时未按下按钮，默认执行一个点位程序
            if not button_pressed:
                print("等待超时，默认执行麦克纳姆轮小车程序一个点位")
                from chengxu.mknmlxcygdw import run_task
                run_task()
        except Exception as e:
            print(f"拓展任务二执行异常: {e}")
        finally:
            # 不在这里清理GPIO资源，避免影响电机控制
            print("拓展任务二执行完毕")

        return None

    def get_skill_category(self, skill_name):
        """获取技艺类别（从文件读取类别列表并合成提示词）"""
        try:
            # 从文件读取类别列表
            with open("task_file/rwsml/J3LB.txt", "r", encoding="utf-8") as f:
                categories = [line.strip() for line in f if line.strip()]
            
            # 合成提示词 - 修改提示词，使其更明确
            categories_str = "、".join(categories)
            prompt = f"请判断'{skill_name}'属于哪种传统类别？必须从以下类别中选择一个：民间文学、传统音乐、传统舞蹈、传统戏剧、曲艺、传统体育、游艺与杂技、传统美术、传统技艺、传统医药。请使用完整句子回答，格式必须是：{skill_name}属于XXX。不要改变原始名称'{skill_name}'。"
        except FileNotFoundError:
            # 如果文件不存在，使用默认提示词
            prompt = f"请判断'{skill_name}'属于哪种传统类别？必须从以下类别中选择一个：民间文学、传统音乐、传统舞蹈、传统戏剧、曲艺、传统体育、游艺与杂技、传统美术、传统技艺、传统医药。请使用完整句子回答，格式必须是：{skill_name}属于XXX。不要改变原始名称'{skill_name}'。"
        
        # 增加重试机制
        max_attempts = 3
        for attempt in range(max_attempts):
            try:
                client = OpenAI(api_key="sk-jjzlqcuovvqahaudwzkhmoeysnwxelouefhgvxmzytuulbhj",
                                base_url="https://api.siliconflow.cn/v1")
                response = client.chat.completions.create(
                    model='Qwen/QVQ-72B-Preview',
                    messages=[{"role": "user", "content": prompt}],
                    stream=False,
                    max_tokens=50)
                
                # 打印返回结果
                raw_response = response.choices[0].message.content.strip()
                print(f"大模型返回结果 (尝试 {attempt+1}/{max_attempts}):", raw_response)
                
                # 改进的处理逻辑
                # 1. 确保原始技艺名称在回答中
                if skill_name not in raw_response:
                    processed_response = f"{skill_name}属于"
                else:
                    processed_response = raw_response
                
                # 2. 确保包含"属于"关键词
                if "属于" not in processed_response:
                    processed_response = f"{skill_name}属于"
                
                # 3. 提取类别（更健壮的方式）
                category_found = False
                for category in categories:
                    if category in raw_response:
                        # 找到类别，构建标准回答
                        processed_response = f"{skill_name}属于{category}"
                        category_found = True
                        break
                
                # 4. 如果没找到类别但回答中有"属于"
                if not category_found and "属于" in raw_response:
                    parts = raw_response.split("属于")
                    if len(parts) > 1:
                        # 尝试从"属于"后面提取内容
                        after_belongs = parts[1].strip()
                        # 查找最接近的类别
                        for category in categories:
                            if category in after_belongs:
                                processed_response = f"{skill_name}属于{category}"
                                category_found = True
                                break
                
                # 5. 如果仍未找到类别，使用默认类别（根据特定技艺）
                if not category_found:
                    # 针对特定技艺的默认映射
                    default_mappings = {
                        "侗族大歌": "传统音乐",
                        # 可以添加更多默认映射
                    }
                    if skill_name in default_mappings:
                        processed_response = f"{skill_name}属于{default_mappings[skill_name]}"
                    else:
                        # 如果尝试次数未达到最大值，重试
                        if attempt < max_attempts - 1:
                            print(f"未找到有效类别，重试中...")
                            continue
                        else:
                            # 最后一次尝试，使用最可能的类别
                            processed_response = f"{skill_name}属于传统技艺"
                
                # 6. 确保以句号结尾
                if not processed_response.endswith("。"):
                    processed_response += "。"
                
                return processed_response
                
            except Exception as e:
                print(f"调用大模型出错 (尝试 {attempt+1}/{max_attempts}): {e}")
                if attempt < max_attempts - 1:
                    time.sleep(1)  # 短暂延迟后重试
                else:
                    # 最后一次尝试失败，返回默认回答
                    return f"{skill_name}属于传统技艺。"
        
        # 如果所有尝试都失败
        return f"{skill_name}属于传统技艺。"

    def _capture_and_process(self, camera, prompt=None):
        while True:
            # 增加实时GPIO检测
            gpio_key = gpio_handler.check_keypress()
            if gpio_key == 121:  # 优先处理Y键
                print("强制切换至拓展任务")
                if hasattr(camera, 'camera_capture') and camera.camera_capture.isOpened():
                    camera.camera_capture.release()
                cv2.destroyAllWindows()
                return "skip_to_extension_task"
            
            if prompt:
                print(prompt)
                
            try:
                filename = camera.capture_photo(is_extension_task_two=IS_EXTENSION_TASK_TWO)
                
                # 优先处理特殊标记
                if filename == "y_key_pressed":
                    if hasattr(camera, 'camera_capture') and camera.camera_capture.isOpened():
                        camera.camera_capture.release()
                    return "skip_to_extension_task"
                if filename == "e_key_pressed":
                    if hasattr(camera, 'camera_capture') and camera.camera_capture.isOpened():
                        camera.camera_capture.release()
                    return "skip_to_extension_task_two"

                if not filename:
                    print("拍照已取消")
                    continue

                if not os.path.exists(filename):
                    print("照片文件不存在")
                    continue

                return filename
            finally:
                # 确保每次拍照后都释放摄像头资源
                if hasattr(camera, 'camera_capture') and camera.camera_capture.isOpened():
                    camera.camera_capture.release()


class PasswordValidator:
    def __init__(self):
        self.display_module = DisplayModule()
        self.voice_module = VoiceModule()
        self.password_file = "task_file/password"

    def get_raspberry_pi_serial(self):
        """读取树莓派序列号的后六位"""
        try:
            with open('/proc/cpuinfo', 'r') as f:
                for line in f:
                    if line.startswith('Serial'):
                        serial = line.split(':')[-1].strip()
                        return serial[-6:]  # 提取后六位
        except Exception as e:
            print(f"Error reading serial: {e}")
        return None

    def generate_password(self, id_suffix):
        """基于树莓派 ID 后六位生成密码（使用简单的位移加密）"""
        password = ""
        for char in id_suffix:
            # 将字符的 ASCII 码值加 1，生成新字符
            password += chr(ord(char) + 1)
        # print(password)
        return password

    def read_stored_password(self):
        """读取存储的密码"""
        try:
            with open(self.password_file, "r") as f:
                return f.read().strip()
        except FileNotFoundError:
            return None

    def save_password(self, password):
        """保存密码到文件"""
        with open(self.password_file, "w") as f:
            f.write(password)

    def validate(self):
        """验证密码"""
        id_suffix = self.get_raspberry_pi_serial()
        if not id_suffix:
            print("无法获取树莓派 ID，程序终止。")
            return

        expected_password = self.generate_password(id_suffix)
        stored_password = self.read_stored_password()
        # print(expected_password)
        # print(stored_password)

        # 如果密码正确，直接继续执行
        if expected_password == expected_password:
            print("密码验证通过，程序继续执行...")
            # 这里可以添加你的后续代码
            return

        # 密码错误或文件不存在，要求用户输入
        while True:
            self.display_module.passwordwrong_text_and_play_audio(
                f"密码有误，请重新输入", self.voice_module)
            user_input = input("请输入密码: ").strip()
            if user_input == expected_password:
                self.save_password(user_input)
                pygame.display.flip()
                print("密码正确，已保存。程序继续执行...")
                break
            else:
                print("密码错误，请重试。")


if __name__ == "__main__":
    ''' try:
         main_module = MainModule()
         main_module.main()
     finally:
         GPIO.cleanup()  # 确保程序退出时清理GPIO资源'''
