# api_server.py (最终修复版)

from contextlib import asynccontextmanager
from email.policy import strict
from multiprocessing import context
# from models.DINO.dinov2.dinov2.dinov2.data import transforms
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration, BitsAndBytesConfig
import uvicorn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import torchvision.transforms as transforms
import base64
from PIL import Image
import io
import os
from typing import List, Dict, AsyncGenerator
import sys
from openai import OpenAI
import requests

sys.path.append('/data/users/guochao/models/DINO/dinov2/dinov2')
from dinov2.models.vision_transformer import vit_large, vit_base



API_KEY = os.environ.get("DASHSCOPE_API_KEY", "sk-f9d6ea9cd18547d4b4a08955560c988b")
# --- 2. 模型加载 (不变) ---

ViT_PATH = "/data/users/guochao/models/DINO/dinov2/dinov2/outputs/lora_finetuned_model_epoch_15.pth"
vit_model = None
label_map = None  # 用于存储类别标签映射

# --- 1. FastAPI 应用初始化 (不变) ---
app = FastAPI(title="多模态昆虫识别API", version="1.0.0")
@app.on_event("startup")
async def load_model():
    global vit_model, label_map
    print("正在加载ViT模型...")
    try:
        # 加载DINOv2基础模型
        vit_model = vit_base()
        
        if os.path.exists(ViT_PATH):
            lora_state_dict = torch.load(ViT_PATH, map_location='cpu')
            # 如果是LoRA微调权重，需要特殊处理
            
            try:
                vit_model.load_state_dict(lora_state_dict, strict=False)
            except:
                model_state_dict = vit_model.state_dict()
                for name, param in lora_state_dict.items():
                    if name in model_state_dict:
                        model_state_dict[name].copy_(param)
                vit_model.load_state_dict(model_state_dict,strict=False)
                
        
        vit_model.eval()
        print("ViT模型已加载")
        
        # 加载标签映射文件（需要根据您的实际标签文件路径调整）
        # 这里假设您有一个包含类别名称的文本文件
        label_map_path = "/data/users/guochao/datasets/inaturalist/train_datas/train_labels.txt"
        if os.path.exists(label_map_path):
            with open(label_map_path, 'r', encoding='utf-8') as f:
                label_map = []
                for line in f:
                    line = line.strip()
                    if line:
                        parts = line.split(" ",2)
                        if len(parts) == 3:
                            label_map.append(parts[2])
                        else:
                            label_map.append(line)

                # label_map = [line.strip() for line in f.readlines()]
        else:
            # 如果没有标签文件，创建一个默认的
            label_map = [f"类别_{i}" for i in range(2526)]  # 假设有1000个类别
            
    except Exception as e:
        print(f"加载ViT模型时发生严重错误: {e}")
        raise e
    


# --- 3. 更新API的请求和响应数据模型 ---
class ChatMessage(BaseModel):
    role: str
    content: str

class ChatRequest(BaseModel):
    image_base64: str
    prompt: str
    species_info: dict
    chat_history: List[ChatMessage] = [] # 新增：接收聊天历史

class ChatResponse(BaseModel):
    response: str

class VitInferenceRequest(BaseModel):
    image_base64: str

class Prediction(BaseModel):
    class_name: str
    class_index: int
    probability: float

class VitInferenceResponse(BaseModel):
    predictions: List[Prediction]
    top_class: str


# --- 4. ViT模型推理API端点 ---
@app.post("/vit_inference", response_model=VitInferenceResponse)
async def vit_inference(request: VitInferenceRequest):
    global vit_model, label_map
    if vit_model is None:
        raise HTTPException(status_code=503, detail="ViT模型正在加载或加载失败，请稍后再试。")

    try:
        # 解码图像
        image_bytes = base64.b64decode(request.image_base64)
        image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
        
        # 预处理图像以适配DINOv2模型
        # DINOv2通常需要224x224大小的图像
        transform = transforms.Compose([
            transforms.Resize((224,224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        image_tensor = transform(image).unsqueeze(0)  # 添加batch维度
        
        # 模型推理
        with torch.no_grad():
            features = vit_model(image_tensor)
            # 如果模型有分类头，直接输出分类结果
            # 如果没有，您需要添加一个分类头
            # 这里假设features就是分类logits
            print(f"Features shape: {features.shape}")
            # 应用softmax获取概率
            probabilities = torch.nn.functional.softmax(features, dim=1)
            
            # 获取top-5预测结果
            top_probs, top_indices = torch.topk(probabilities, 5)
            
            predictions = []
            for i in range(len(top_indices[0])):
                class_idx = top_indices[0][i].item()
                prob = top_probs[0][i].item()
                class_name = label_map[class_idx] if class_idx < len(label_map) else f"类别_{class_idx}"
                predictions.append({
                    "class_name": class_name,
                    "class_index": class_idx,
                    "probability": prob
                })
            
            # 获取最高概率的类别
            top_class = predictions[0]["class_name"] if predictions else "未知"
        
        return VitInferenceResponse(predictions=predictions, top_class=top_class)
        
    except Exception as e:
        print(f"ViT推理时发生错误: {e}")
        raise HTTPException(status_code=500, detail=f"ViT模型推理时发生内部错误: {str(e)}")
    
# --- 4. 重构API端点的Prompt构建逻辑 ---
@app.post("/chat", response_model=ChatResponse)
async def chat_with_vlm(request: ChatRequest):
    try:
        client = OpenAI(
            api_key = os.getenv('OPENAI_API_KEY', 'sk-f9d6ea9cd18547d4b4a08955560c988b'),
            base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        
        info = request.species_info
        context_info = f"关于这张图片里的昆虫，已知信息：物种名是 {info.get('name', '未知')} (拉丁学名: {info.get('latin_name', '未知')})。"
        user_prompt  = f"{context_info}\n我的问题是：{request.prompt}"

        messages = [{
            "role": "system",
            "content": [{"text": "text", "text": "你是一个知识渊博且友好的中国昆虫学专家，请保证你的回答是非常自信且正确的。"}]

        }]
        
        # 添加历史对话
        for message in request.chat_history:
            messages.append({
                "role": message.role,
                "content": [{"text": message.content}]
            })
        messages.append({
            "role": "user",
            "content": [
                {
                    "image":f"data:image/jpeg;base64,{request.image_base64}"
                },
                {
                    "text": user_prompt
                }
            ]
        })

        # 构建请求数据
        data = {
            "model":"qwen-vl-max",
            "input": {
                "messages": messages
            },
            "parameters": {"max_new_tokens": 512, 
                    "top_p":0.85, 
                    "top_k":50, 
                    "temperature":0.7, 
                    "repetition_penalty":1.1, 
                    }
        }
        # 发送请求
        response = requests.post("https://dashscope.aliyuncs.com/compatible-mode/v1", headers=headers, json=data)
        response.raise_for_status()
        # 解析响应
        result = response.json()
        assistant_response = result["output"]["choices"][0]["message"]["content"]
        
        return ChatResponse(response = assistant_response)
    
    except requests.exceptions.RequestException as e:
        raise HTTPException(status_code=500, detail=f"请求DashScope API时发生错误: {e}")
    except KeyError as e:
        raise HTTPException(status_code=500, detail=f"解析DashScope API响应时发生错误: {e}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"处理请求时发生错误: {e}")
    

# (根路径和主函数入口保持不变)
@app.get("/")
def read_root(): 
    return {"status": "多模态昆虫识别API运行中"}
if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8503)