import os
import json
import timm
import torch
from PIL import Image
from torchvision import transforms
import openai

# 类别映射
def load_class_mapping():
    class_to_idx = {
        '刺槐叶瘿蚊': 0, '十二齿小蠹': 1, '双钩异翅长蠹': 2, '埃及吹绵蚧': 3,
        '悬铃木方翅网蝽': 4, '扶桑绵粉蚧': 5, '日本双棘长蠹': 6, '松树蜂': 7,
        '桉树枝瘿姬小蜂': 8, '椰心叶甲': 9, '澳洲吹绵蚧': 10, '红脂大小蠹': 11,
        '美国白蛾': 12, '美国白蛾幼虫': 13, '苏铁白轮盾蚧': 14, '褐纹甘蔗象': 15,
        '银合欢豆象': 16, '锈色棕榈象': 17, '长林小蠹': 18, '马缨丹': 19
    }
    idx_to_class = {v: k for k, v in class_to_idx.items()}
    return class_to_idx, idx_to_class

# 预处理转换
def get_transform():
    return transforms.Compose([
        transforms.Resize((480, 480)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

class MultiModalPredictor:
    def __init__(self, device="cpu"):
        self.device = device
        self.model = self.load_model()
        self.class_to_idx, self.idx_to_class = load_class_mapping()
        self.transform = get_transform()

    # 加载预训练模型
    def load_model(self):
        model = timm.create_model('tf_efficientnetv2_l.in21k_ft_in1k', pretrained=False, num_classes=20)
        model.load_state_dict(torch.load("E:/pythonProject/efficientnetv2_l_focal_loss.pth", map_location=self.device))
        model.to(self.device)
        model.eval()
        return model

    # 预测图片（直接返回top1结果）
    def direct_predict_image(self, image_path):
        try:
            image = Image.open(image_path).convert("RGB")
            image = self.transform(image).unsqueeze(0).to(self.device)

            with torch.no_grad():
                outputs = self.model(image)
                probabilities = torch.nn.functional.softmax(outputs, dim=1)
                _, top_indices = torch.topk(probabilities, 1)
                return self.idx_to_class[top_indices[0].item()]

        except Exception as e:
            print(f"图像预测失败: {e}")
            return "图像预测失败"

    # 多模态预测（结合图片和文本描述）
    def predict_multimodal(self, image_path, user_input, k=3, stream=False):
        try:
            # 图像预测结果
            class_names, probabilities = self.predict_image(image_path, k)
            if not class_names:
                return "图像预测失败，无法继续"

            # 构造提示词
            topk_info = "\n".join([f"Class: {cls}, Probability: {prob * 100:.2f}%" for cls, prob in zip(class_names, probabilities)])
            prompt_detail = f"top3：\n{topk_info}\n物种描述：\n{user_input}\n"

            final_prompt = (
                "根据以下规则判断并输出物种名称。提供模型预测的top3结果和物种描述，仅输出一个物种名称，不要附加其他内容。\n"
                "规则：\n"
                "1. 若任一物种概率≥90%，直接输出该物种名称（忽略描述）。\n"
                "2. 若无物种概率≥90%且描述不为空，结合描述和预测结果判断。\n"
                "3. 若描述为空，输出概率最高的物种名称。\n\n"
                f"预测结果：\n{prompt_detail}"
            )

            # 调用大模型（支持流式输出）
            client = openai.Client(
                base_url="http://172.25.100.144:7861/knowledge_base/local_kb/IAS",
                api_key="EMPTY"
            )

            if stream:
                # 流式输出模式
                response = client.chat.completions.create(
                    model="glm4-0414",
                    messages=[{"role": "user", "content": final_prompt}],
                    stream=True,
                    temperature=0.0,
                    extra_body={
                        "top_k": 8,
                        "score_threshold": 0.7,
                        "return_direct": False,
                    },
                )
                return self._stream_response(response)
            else:
                # 非流式输出模式
                response = client.chat.completions.create(
                    model="glm4-0414",
                    messages=[{"role": "user", "content": final_prompt}],
                    stream=False,
                    temperature=0.0,
                    extra_body={
                        "top_k": 8,
                        "score_threshold": 0.7,
                        "return_direct": False,
                    },
                )
                return response.choices[0].message.content

        except Exception as e:
            print(f"多模态预测失败: {e}")
            return "预测失败，请检查输入"

    # 纯文本对话（调用大模型）
    def communicate_with_bigmodel(self, content, stream=False):
        try:
            client = openai.Client(
                base_url="http://172.25.100.144:7861/knowledge_base/local_kb/IAS",
                api_key="EMPTY"
            )

            if stream:
                # 流式输出模式
                response = client.chat.completions.create(
                    model="glm4-0414",
                    messages=[{"role": "user", "content": content}],
                    stream=True,
                    temperature=0.7,
                    extra_body={
                        "top_k": 3,
                        "score_threshold": 2.0,
                        "return_direct": False,
                    },
                )
                return self._stream_response(response)
            else:
                # 非流式输出模式
                response = client.chat.completions.create(
                    model="glm4-0414",
                    messages=[{"role": "user", "content": content}],
                    stream=False,
                    temperature=0.7,
                    extra_body={
                        "top_k": 3,
                        "score_threshold": 2.0,
                        "return_direct": False,
                    },
                )
                return response.choices[0].message.content
        except Exception as e:
            print(f"大模型对话失败: {e}")
            return "大模型对话失败"

    # 内部使用的图像预测（返回topk结果和概率）
    def predict_image(self, image_path, k=3):
        try:
            image = Image.open(image_path).convert("RGB")
            image = self.transform(image).unsqueeze(0).to(self.device)

            with torch.no_grad():
                outputs = self.model(image)
                probabilities = torch.nn.functional.softmax(outputs, dim=1)
                topk_values, topk_indices = torch.topk(probabilities, k)
                topk_class_names = [self.idx_to_class[idx.item()] for idx in topk_indices[0]]
                return topk_class_names, topk_values[0].tolist()

        except Exception as e:
            print(f"图像预测失败: {e}")
            return [], []

    # 处理流式响应（内部方法）
    def _stream_response(self, response):
        full_response = ""
        print("大模型回复: ", end="", flush=True)
        for chunk in response:
            if chunk.choices[0].delta.content:
                content = chunk.choices[0].delta.content
                print(content, end="", flush=True)
                full_response += content
        print()  # 换行
        return full_response


if __name__ == "__main__":
    # 初始化预测器
    device = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
    print(f"使用设备: {device}")
    predictor = MultiModalPredictor(device=device)

    # 控制台交互主循环
    while True:
        print("\n===== 物种识别工具 =====")
        print("1. 仅图像识别")
        print("2. 图像+文本描述识别（多模态）")
        print("3. 大模型对话（纯文本）")
        print("4. 退出程序")

        choice = input("请选择功能（1-4）: ").strip()

        if choice == "1":
            # 仅图像识别
            image_path = input("请输入图像文件路径: ").strip()
            if not os.path.exists(image_path):
                print("错误：图像文件不存在！")
                continue
            result = predictor.direct_predict_image(image_path)
            print(f"识别结果: {result}")

        elif choice == "2":
            # 多模态识别
            image_path = input("请输入图像文件路径: ").strip()
            if not os.path.exists(image_path):
                print("错误：图像文件不存在！")
                continue
            user_input = input("请输入物种描述: ").strip()

            # 是否使用流式输出
            use_stream = input("是否使用流式输出？(y/n): ").lower().startswith('y')
            print("等待大模型响应中...")

            if use_stream:
                predictor.predict_multimodal(image_path, user_input, stream=True)
            else:
                result = predictor.predict_multimodal(image_path, user_input)
                print(f"多模态识别结果: {result}")

        elif choice == "3":
            # 大模型对话
            content = input("请输入对话内容: ").strip()

            # 是否使用流式输出
            use_stream = input("是否使用流式输出？(y/n): ").lower().startswith('y')
            print("等待大模型响应中...")

            if use_stream:
                predictor.communicate_with_bigmodel(content, stream=True)
            else:
                result = predictor.communicate_with_bigmodel(content)
                print(f"大模型回复: {result}")

        elif choice == "4":
            # 退出程序
            print("程序已退出")
            break

        else:
            print("无效选择，请输入1-4之间的数字！")