from open_softwares import OpenSoftwares
from ollama_client import OllamaClient
import json
from queue import Queue
from threading import Event


class InstructionMode:
    """传入实例,这个模式的回答就不要流式传输了"""
    def __init__(
            self, 
            tts_text_queue=Queue(),
            playmusic_filename_queue=Queue(),
            playeffect_filename_queue=Queue(),
            
            llm_client=OllamaClient(), 
            input_text='', 
            system_prompt='', 
            assistant_name='小智',
            stop_instruction_event=Event(),
            ):
        
        """1. 若改变指令映射, 2. 需要配置方法，3. 需要修改系统提示词"""
        self.command_mapping = {
            '无法识别您的指令': self.instruction_error,
            # 1️⃣ (1 / 3) 变更指令的地方
            '为您打开微信': self.open_wechat,
            '为您关闭微信': self.close_wechat,
        }

        self.control = OpenSoftwares()
        self.assistant_name = assistant_name
        self.llm_client = llm_client
        self.input_text = input_text
        if not system_prompt:
            with open('assets/prompts/insturction_prompt.txt', 'r', encoding='utf-8') as f:
                system_prompt = f.read()
        self.system_prompt = system_prompt
        self.check_command_loop = 3
        self.check_json_loop = 3

        self.tts_text_queue = tts_text_queue
        self.playmusic_filename_queue = playmusic_filename_queue
        self.playeffect_filename_queue = playeffect_filename_queue
        # 指令模式停止标志
        self.stop_instruction_event = stop_instruction_event

    def _check_command(self, answer):
        """防止大模型不按要求回复"""
        if answer in self.command_mapping:
            self.check_command_loop = 3
            return answer
        elif self.check_command_loop > 0:
            instruction = "Error: 请按照 <system> 格式要求回复"
            print(f"系统自动回复：{instruction}, 倒数 {self.check_command_loop} 次")
            print(f"{self.assistant_name}：", end='')
            new_answer = self.llm_client.chat_print(instruction)
            self.check_command_loop -= 1
            # 递归一下，大模型抽风了就管不了了
            return self._check_command(new_answer)
        else:
            self.check_command_loop = 3
            return '指令无法识别'
        
    def _is_valid_json(self, json_str):
        try:
            json.loads(json_str)
            return True
        except ValueError:
            return False
        except TypeError:
            return False

    def run(self):
        # answer 是个字符串
        print(f"{self.assistant_name}：", end='')
        answer = self.llm_client.chat_print(self.input_text,system_prompt=self.system_prompt)
        answer = self._check_command(answer)
        return self.command_mapping[answer]()


    """
    👇 指令映射方法
    """
    def instruction_error(self):
        print('指令无法识别')
        return False
    # 2️⃣ (2 / 3) 配置指令方法的地方
    def open_wechat(self):
        self.control.open_application(r'C:\Program Files\Tencent\Weixin\Weixin.exe')
        self.tts_text_queue.put('为您打开微信')
        return True
    def close_wechat(self):
        self.control.close_application('Weixin.exe')
        self.tts_text_queue.put('为您关闭微信')
        return True
    
    
    """
    👇 尚未写入提示词或者指令映射的方法, 删了也不影响啥，适合独立使用
    """
    def _check_json(self, answer):
        if self._is_valid_json(answer):
            self.check_json_loop = 3
            return answer
        elif self.check_json_loop > 0:
            instruction = "Error: 请严格按照 `json` 格式回复，请检查格式。以 `{` 作为开头， `}` 作为结尾"
            print(f"系统自动回复：{instruction}, 倒数 {self.check_json_loop} 次")
            print(f"{self.assistant_name}：", end='')
            new_answer = self.llm_client.chat_print(instruction, is_printing=True)
            self.check_json_loop -= 1
            # 依旧是递归
            return self._check_json(new_answer)
        else:
            self.check_json_loop = 3
            return "指令无法识别"
        
    def json_instruction(self, input_text='', input_text_path="assets/prompts/insturction_prompt_json_user.txt", system_prompt='', system_prompt_path="assets/prompts/insturction_prompt_json_sys.txt", is_print=True):
        self.llm_client.history = []
        if not system_prompt:
            with open(system_prompt_path, 'r', encoding='utf-8') as f:
                system_prompt = f.read()
        if not input_text:
            with open(input_text_path, 'r', encoding='utf-8') as f:
                input_text = f.read()
        # 故意误导
        if is_print:
            print(f"系统提示词：\n{system_prompt}")
            print(f"\n用户指令：\n{input_text}")
        print(f"{self.assistant_name}：", end='')
        answer = self.llm_client.chat_print(user_prompt=input_text, system_prompt=system_prompt)
        answer = self._check_json(answer)

        return answer


if __name__ == "__main__":
    mode = InstructionMode()
    mode.llm_client.model = 'qwen3:30b-a3b-instruct-2507-q8_0'
    mode.json_instruction()
