import os

import openai
import timm  # EfficientNetV2 提供库
import torch
from PIL import Image
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from torchvision import transforms
import json
from pydantic import BaseModel

# 初始化 FastAPI
app = FastAPI()


# 类别映射

def load_class_mapping():
    class_to_idx = {'刺槐叶瘿蚊': 0, '十二齿小蠹': 1, '双钩异翅长蠹': 2, '埃及吹绵蚧': 3, '悬铃木方翅网蝽': 4,
                    '扶桑绵粉蚧': 5, '日本双棘长蠹': 6, '松树蜂': 7, '桉树枝瘿姬小蜂': 8, '椰心叶甲': 9,
                    '澳洲吹绵蚧': 10,
                    '红脂大小蠹': 11, '美国白蛾': 12, '美国白蛾幼虫': 13, '苏铁白轮盾蚧': 14, '褐纹甘蔗象': 15,
                    '银合欢豆象': 16,
                    '锈色棕榈象': 17, '长林小蠹': 18, '马缨丹': 19}
    idx_to_class = {v: k for k, v in class_to_idx.items()}
    return class_to_idx, idx_to_class


# 预处理转换
def get_transform():
    return transforms.Compose([
        transforms.Resize((480, 480)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])


class MultiModalPredictor:
    def __init__(self, device="cpu"):
        self.device = device
        self.model = self.load_model()
        self.class_to_idx, self.idx_to_class = load_class_mapping()
        self.transform = get_transform()

    # 加载预训练模型
    def load_model(self):
        model = timm.create_model('tf_efficientnetv2_l.in21k_ft_in1k', pretrained=False, num_classes=20)
        model.load_state_dict(torch.load("E:/pythonProject/efficientnetv2_l_focal_loss.pth", map_location=self.device))
        model.to(self.device)
        model.eval()
        return model

    # 预测图片
    def predict_image(self, image_path, k=3):
        try:
            # 加载图片
            image = Image.open(image_path)
            if image.mode != "RGB":
                image = image.convert("RGB")

            # 数据预处理
            image = self.transform(image).unsqueeze(0).to(self.device)

            with torch.no_grad():
                outputs = self.model(image)
                probabilities = torch.nn.functional.softmax(outputs, dim=1)
                topk_values, topk_indices = torch.topk(probabilities, k)
                topk_class_names = [self.idx_to_class[idx.item()] for idx in topk_indices[0]]
                return topk_class_names, topk_values[0].tolist()

        except Exception as e:
            print(f"预测失败: {e}")
            return [], []

    # 预测图片
    def direct_predict_image(self, image_path, k=3):
        try:
            # 加载图片
            image = Image.open(image_path)
            if image.mode != "RGB":
                image = image.convert("RGB")

            # 数据预处理
            image = self.transform(image).unsqueeze(0).to(self.device)

            with torch.no_grad():
                outputs = self.model(image)
                probabilities = torch.nn.functional.softmax(outputs, dim=1)
                topk_values, topk_indices = torch.topk(probabilities, k)
                topk_class_names = [self.idx_to_class[idx.item()] for idx in topk_indices[0]]
                return topk_class_names

        except Exception as e:
            print(f"预测失败: {e}")
            return [], []

    # 多模态预测
    def predict_multimodal(self, image_path, user_input, k=3):
        try:
            # 预测图像
            class_names, probabilities = self.predict_image(image_path, k)

            # 构造topk信息
            topk_info = "\n".join(
                [f"Class: {cls}, Probability: {prob * 100:.2f}%" for cls, prob in zip(class_names, probabilities)])

            # 构造 top3 信息和物种描述
            prompt_detail = f"top3：\n{topk_info}\n物种描述：\n{user_input}\n"

            # 拼接完整的提示（包含规则说明）
            final_prompt = (
                "我需要你根据以下规则判断并输出物种名称。我将提供模型预测的 top3 结果和物种描述。"
                "请严格按照下述规则作答，仅输出一个物种的名称，不要附加其他内容。\n\n"
                "规则：\n"
                "1. 如果任一物种的识别概率 ≥ 90%，直接输出该物种的名称（忽略物种描述）。\n"
                "2. 如果没有任何物种的识别概率达到 90%，且物种描述不为空，请结合描述信息和预测结果判断后输出你认为最合适的物种名称。\n"
                "3. 如果物种描述为空，则直接输出概率最高的物种名称。\n\n"
                "以下是预测结果：\n"
                f"{prompt_detail}"
            )

            # 调用AI模型
            base_url = "http://172.25.100.144:7861/knowledge_base/local_kb/IAS"
            data = {
                "model": "glm4-0414",
                "messages": [
                    {"role": "user", "content": final_prompt},
                ],
                "stream": False,
                "temperature": 0.0,
                "extra_body": {
                    "top_k": 8,
                    "score_threshold": 0.7,
                    "return_direct": False,
                },
            }

            # 使用format方法确保prompt被正确插入
            # data["messages"][0]["content"] = data["messages"][0]["content"].format(prompt=prompt)

            client = openai.Client(base_url=base_url, api_key="EMPTY")
            resp = client.chat.completions.create(**data)
            resp_dict = json.loads(resp)

            return resp_dict['choices'][0]['message']['content']

        except Exception as e:
            print(f"多模态预测失败: {e}")
            return "预测失败，请检查输入"


# 直接识别
@app.post("/recognize/")
async def recognize(request: dict):
    try:
        # 获取文件路径和 user_input
        image_path = request.get('image_path')
        print(image_path)

        # 验证图片文件路径
        if not os.path.exists(image_path):
            raise HTTPException(status_code=400, detail="上传的图片文件路径无效。")

        # 获取预测结果
        result = predictor.direct_predict_image(image_path)

        return JSONResponse(content={"result": result[0]})
    except Exception as e:
        return JSONResponse(status_code=500, content={"error": f"预测失败: {str(e)}"})


class ContentRequest(BaseModel):
    content: str


# 大模型对话
@app.post("/CommunicateWithBigModel")
async def Communicate_with_bigmodel(request: ContentRequest):
    base_url = "http://172.25.100.144:7861/knowledge_base/local_kb/IAS"
    content = request.content
    data = {
        "model": "glm4-0414",
        "messages": [{"role": "user", "content": content}],
        "stream": False,
        "temperature": 0.7,
        "extra_body": {
            "top_k": 3,
            "score_threshold": 2.0,
            "return_direct": False,
        },
    }
    client = openai.Client(base_url=base_url, api_key="EMPTY")
    resp = client.chat.completions.create(**data)

    resp_dict = json.loads(resp)
    res = resp_dict['choices'][0]['message']['content']
    return res


# 定义一个 POST 请求接口，接收 JSON 格式的文件路径和 user_input
@app.post("/MultiModalRecognize/")
async def multi_modal_recognize(request: dict):
    try:
        # 获取文件路径和 user_input
        image_path = request.get('image_path')
        user_input = request.get('user_input')

        # 验证 user_input
        if not user_input or not user_input.strip():
            return JSONResponse(status_code=400, content={"error": "user_input不能为空，请提供物种描述。"})

        # 验证图片文件路径
        if not os.path.exists(image_path):
            raise HTTPException(status_code=400, detail="上传的图片文件路径无效。")

        # 获取预测结果
        result = predictor.predict_multimodal(image_path, user_input)

        return JSONResponse(content={"result": result})

    except Exception as e:
        return JSONResponse(status_code=500, content={"error": f"预测失败: {str(e)}"})


if __name__ == "__main__":
    device = "mps" if torch.backends.mps.is_available() else "cpu"
    predictor = MultiModalPredictor(device=device)
    import uvicorn
    # 172.26.87.25
    uvicorn.run(app, host="172.27.135.152", port=10000)
    #C:/Users/17805/Desktop/insects/1.jpg
