from langchain_deepseek import ChatDeepSeek
import os
import re
import json
from langchain_community.chat_models.tongyi import ChatTongyi
from dashscope import MultiModalConversation
import dashscope


class ChatInterface:

    def __init__(self):
        self.api_key = "sk-4ff63ec7eab54cfd9cd7c1862ddf1547"

    def chat_model_tool(self):
        chat = ChatDeepSeek(
            model="deepseek-chat",
            # model="deepseek-reasoner",
            temperature=0.7,
            # response_format={"type": "json_object"},
            api_key=self.api_key
        )
        return chat

    # 调用大模型
    def chat_model(self):
        chat = ChatDeepSeek(
            model="deepseek-chat",
            # model="deepseek-reasoner",
            # temperature=0.7,  本地的尽量用0.7， 调用线上的，直接用1.3， 主要是要求生成的结果的精确度
            # 0.3不错
            # 看看0.2 和0.1， 以及0
            temperature=0.2,
            top_p=0.5,  # 允许在高概率词中选择
            frequency_penalty=1.2,  # 减少重复内容
            stop=["\n\n", "```"],  # 添加停止序列防止多余输出
            # response_format={"type": "json_object"},
            api_key=self.api_key
        )
        return chat

    def chat_model_reasoner(self):
        chat = ChatDeepSeek(
            model="deepseek-reasoner",
            # temperature=0.7,  本地的尽量用0.7， 调用线上的，直接用1.3， 主要是要求生成的结果的精确度
            # 0.3不错
            # 看看0.2 和0.1， 以及0
            temperature=0.2,
            top_p=0.5,  # 允许在高概率词中选择
            frequency_penalty=1.2,  # 减少重复内容
            stop=["\n\n", "```"],  # 添加停止序列防止多余输出
            # response_format={"type": "json_object"},
            api_key=self.api_key
        )
        return chat

    def qwen_model(self):
        os.environ["DASHSCOPE_API_KEY"] = "sk-c37c66990ef24f349ab5bc9b0f4d3387"
        chatLLM = ChatTongyi(streaming=True)
        return chatLLM

    # def qwen_model_lv(self):
    #     os.environ["DASHSCOPE_API_KEY"] = "sk-da06138b9f3a4fec973249a61c9edca0"
    #     chatLLM = ChatTongyi(streaming=True)
    #     return chatLLM

    async def analyze_situation(self, image_base64: str, dom: str, error: str = ""):
        """多模态分析当前页面状态"""
        prompt = (
            f"你是一个浏览器自动化专家。当前页面状态如下:\n"
            f"DOM摘要: {dom[:2000]}\n"
            f"错误信息: {error if error else '无'}\n\n"
            "请分析：\n"
            "1. 当前页面内容和状态\n"
            "2. 可能失败的原因\n"
            "3. 建议的下一步操作"
        )

        messages = [{
            "role": "user",
            "content": [
                {"image": f"data:image/png;base64,{image_base64}"},
                {"text": prompt}
            ]
        }]

        response = MultiModalConversation.call(
            model="qwen-vl-max",
            messages=messages,
            max_length=4000,
            top_p=0.9
        )

        if response.status_code == 200:
            return response.output.choices[0].message.content[0]["text"]
        print(f"分析失败: {response.message}")
        return "无法分析当前页面状态"

    async def generate_command(self, instruction: str, context: str = ""):
        """生成可执行的浏览器命令"""
        prompt = (
            "严格输出JSON格式，不要包含额外文本！\n"
            "可用命令类型: 打开浏览器, 打开网页, 点击元素, 输入文本, 等待加载, 获取元素文本\n"
            f"当前上下文: {context}\n"
            f"用户指令: {instruction}\n\n"
            "输出格式: {\"command\": \"命令类型\", \"params\": {参数键值对}}"
        )

        messages = [{"role": "user", "content": [{"text": prompt}]}]

        response = MultiModalConversation.call(
            model="qwen-vl-max",
            messages=messages,
            max_length=2000,
            top_p=0.7
        )

        if response.status_code == 200:
            try:
                result = response.output.choices[0].message.content[0]["text"]
                # 使用正则表达式提取JSON内容
                json_match = re.search(r'\{[\s\S]*\}', result)
                if json_match:
                    json_str = json_match.group(0)
                    return json.loads(json_str)
                else:
                    print(f"未找到JSON内容: {result}")
                    return {"command": "error", "params": {"message": "响应格式错误"}}
            except Exception as e:
                print(f"JSON解析失败: {str(e)}")
        return {"command": "error", "params": {"message": "指令生成失败"}}

    # 构建多模态大模型UI自动化的请求，实现UI+文字的识别
    def call_qwen_vl_max(self, messages):
        dashscope.api_key = "sk-da06138b9f3a4fec973249a61c9edca0"
        response = MultiModalConversation.call(
            model="qwen-vl-max",
            messages=messages,
            top_p=0.9,
            temperature=0.0  # 保持确定性
        )
        # return response.output.choices[0].message.content[0]["text"]
        raw = response.output.choices[0].message.content[0]["text"]
        # 去掉 ```json 包裹
        raw = re.sub(r"^```json\s*", "", raw, flags=re.I)
        raw = re.sub(r"\s*```$", "", raw)
        try:
            data = json.loads(raw)
            return data
        except:
            return raw
