import gradio as gr
import json
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor, AutoConfig
from PIL import Image
from GeoCLIP import GeoCLIP
import torch
from torch.nn import functional as F
from configs.config import ModelConfig, TrainConfig, PathConfig

device = "cuda:0"
processor = AutoProcessor.from_pretrained('/data/xiaoyj2025/GeoVLM/models/siglip2-so400m-patch14-384')
tokenizer = AutoTokenizer.from_pretrained('/data/xiaoyj2025/GeoVLM/models/Qwen2.5-1.5B-Instruct')
AutoConfig.register("vlm_model", ModelConfig)
AutoModelForCausalLM.register(ModelConfig, GeoCLIP)

model = AutoModelForCausalLM.from_pretrained('/data/xiaoyj2025/GeoVLM/src/save/instruct_en/instruct_en)
model.to(device)
print(f'模型参数量为：{sum(p.numel() for p in model.parameters())}')
#sft_model = AutoModelForCausalLM.from_pretrained('/data/xiaoyj2025/GeoVLM/src/save/instruct/instruct')
#sft_model.to(device)

model.eval()
#sft_model.eval()


def generate(mode, image_input, text_input, max_new_tokens=2048, temperature=0.0, top_k=None):
    q_text = tokenizer.apply_chat_template([{"role": "system", "content": '一个好奇的人类和一个人工智能助手之间的聊天。助手对人类的问题给出有用、详细和礼貌的回答。'},
                                            {"role": "user", "content": f'{text_input}'}], \
                                           tokenize=False, \
                                           add_generation_prompt=True).replace('<image>', '<|image_pad|>' * 81)
    input_ids = tokenizer(q_text, return_tensors='pt')['input_ids']
    input_ids = input_ids.to(device)
    # image = Image.open(image_input).convert("RGB")
    pixel_values = processor(text=None, images=image_input).pixel_values
    pixel_values = pixel_values.to(device)
    eos = tokenizer.eos_token_id
    s = input_ids.shape[1]
    while input_ids.shape[1] < s + max_new_tokens - 1:
        #model = sft_model
        inference_res = model(input_ids, None, pixel_values)
        logits = inference_res.logits
        logits = logits[:, -1, :]

        for token in set(input_ids.tolist()[0]):
            logits[:, token] /= 1.0

        if temperature == 0.0:
            _, idx_next = torch.topk(logits, k=1, dim=-1)
        else:
            logits = logits / temperature
            if top_k is not None:
                v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
                logits[logits < v[:, [-1]]] = -float('Inf')

            probs = F.softmax(logits, dim=-1)
            idx_next = torch.multinomial(probs, num_samples=1, generator=None)

        if idx_next == eos:
            break

        input_ids = torch.cat((input_ids, idx_next), dim=1)
    return tokenizer.decode(input_ids[:, s:][0])


def main():
    # 加载数据集
    try:
        with open('/data/xiaoyj2025/GeoVLM/Dataset/test.json', 'r') as f:
            dataset = json.load(f)
    except Exception as e:
        print(f"数据集加载失败: {e}")
        return

    correct = 0
    total = 0

    # 处理每个样本
    for item in tqdm(dataset, desc="处理样本"):
        #print("开始测试")
        try:
            # 解析数据
            image_path = "/data/xiaoyj2025/GeoVLM/Dataset/images/"+item["image"]
            conv = item["conversations"]
            question = next(c["value"] for c in conv if c["from"] == "human")
            answer = next(c["value"] for c in conv if c["from"] == "gpt")
            
            # 加载图像
            with Image.open(image_path) as img:
                img = img.convert("RGB")
                
                # 生成回答
                generated = generate(
                    mode='pretrain',
                    image_input=img,
                    text_input=question,
                    temperature=0.0
                )
                
                # 匹配判断
                if answer.strip().lower() in generated.strip().lower():
                    correct += 1
                total += 1
                
        except Exception as e:
            print(f"处理样本 {item.get('id','未知')} 失败: {e}")
            continue
        

    # 输出结果
    print(f"\n评估完成:正确率 {correct}/{total} = {correct/total:.2%}")

if __name__ == "__main__":
    main()