import argparse
import os

import numpy as np
import torch
from peft import PeftModel
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.autograd import Variable

from demo.read_data import ISICDataSet, ChestXrayDataSet
from gme import GmeQwen2VLWithHash
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

os.environ["TOKENIZERS_PARALLELISM"] = "false"

def predict_hash_code(model, data_loader, device):
    model.eval()
    all_hash = []
    all_labels = []

    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.to(device))
            labels = labels.to(device)

            _, hash_code = model(inputs)

            all_hash.append(hash_code.cpu().numpy())
            all_labels.append(labels.cpu().numpy())

    all_hash = np.vstack(all_hash)
    all_labels = np.vstack(all_labels) if len(all_labels[0].shape) > 1 else np.hstack(all_labels)
    return all_hash, all_labels


def mean_average_precision(database_hash, test_hash, database_labels, test_labels, args):

    T = args.T
    database_hash[database_hash < T] = -1
    database_hash[database_hash >= T] = 1
    test_hash[test_hash < T] = -1
    test_hash[test_hash >= T] = 1

    query_num = test_hash.shape[0]
    sim = np.dot(database_hash, test_hash.T)
    ids = np.argsort(-sim, axis=0)

    APx = []
    precision_at_1 = []
    precision_at_5 = []
    precision_at_10 = []
    Recall = []
    top_ks = [1, 5, 10]

    for i in range(query_num):

        query_label = test_labels[i]

        if isinstance(query_label, np.ndarray) and query_label.ndim > 0:
            query_label = query_label.item()

        if not isinstance(query_label, (int, float)):
            continue

        all_relevant = (database_labels == query_label)
        total_relevant = np.sum(all_relevant)

        top_R_ids = ids[:args.R, i]
        db_labels_topR = database_labels[top_R_ids]
        match = (db_labels_topR == query_label)
        relevant_num = np.sum(match)

        cumulative_match = np.cumsum(match)
        precision = cumulative_match / np.arange(1, args.R + 1)
        ap = np.sum(precision * match) / relevant_num if relevant_num > 0 else 0.0
        APx.append(ap)

        recall = relevant_num / total_relevant if total_relevant > 0 else 0.0
        Recall.append(recall)

        for k in top_ks:
            top_k_ids = top_R_ids[:k] if k <= len(top_R_ids) else top_R_ids
            db_labels_topk = database_labels[top_k_ids]
            match_k = (db_labels_topk == query_label)
            relevant_k = np.sum(match_k)
            precision_k = relevant_k / len(top_k_ids)

            if k == 1:
                precision_at_1.append(precision_k)
            elif k == 5:
                precision_at_5.append(precision_k)
            elif k == 10:
                precision_at_10.append(precision_k)

    mean_ap = np.mean(APx) if APx else 0.0
    mean_recall = np.mean(Recall) if Recall else 0.0
    mp1 = np.mean(precision_at_1) if precision_at_1 else 0.0
    mp5 = np.mean(precision_at_5) if precision_at_5 else 0.0
    mp10 = np.mean(precision_at_10) if precision_at_10 else 0.0

    return mean_ap, mean_recall, mp1, mp5, mp10


def main(args):
    device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    model = GmeQwen2VLWithHash(
        model_path="../gme-Qwen2-VL-2B-Instruct",
        device=device,
        hash_dim=args.hash_dim,
        min_image_tokens=256,
        max_image_tokens=352,
        max_length=768
    )

    model.gme_base = PeftModel.from_pretrained(
        model.gme_base,
        model_id=args.lora_path,
        device_map={"": device}
    )
    model.gme_base = model.gme_base.merge_and_unload()

    if not os.path.exists(args.ckpt_path):
        raise FileNotFoundError(f"模型权重文件不存在: {args.ckpt_path}")
    custom_state_dict = torch.load(args.ckpt_path, map_location=device)
    model.load_state_dict(custom_state_dict, strict=False)
    model.to(device)
    model.eval()
    print(f"已加载模型权重: {args.ckpt_path}")

    normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    test_transform = transforms.Compose([
        transforms.Lambda(lambda img: img.convert("RGB")),
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize
    ])

    if args.dataset == "isic":
        database_dataset = ISICDataSet(
            data_dir=os.path.join(args.dataset_dir, "ISIC-2017_Training_Data"),
            image_list_file=args.train_image_list,
            transform=test_transform
        )
        test_dataset = ISICDataSet(
            data_dir=os.path.join(args.dataset_dir, "ISIC-2017_Test_v2_Data"),
            image_list_file=args.test_list_file,
            transform=test_transform
        )
    elif args.dataset == "covid":
        database_dataset = ChestXrayDataSet(
            data_dir=os.path.join(args.dataset_dir, "trainandval"),
            image_list_file=args.train_image_list,
            transform=test_transform
        )
        test_dataset = ChestXrayDataSet(
            data_dir=os.path.join(args.dataset_dir, "test"),
            image_list_file=args.test_list_file,
            transform=test_transform
        )
    else:
        raise NotImplementedError(f"不支持的数据集: {args.dataset}")

    database_loader = DataLoader(
        database_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True
    )

    print("生成数据库哈希码...")
    database_hash, database_labels = predict_hash_code(model, database_loader, device)
    print("生成测试集哈希码...")
    test_hash, test_labels = predict_hash_code(model, test_loader, device)

    print("计算评估指标...")
    MAP, Recall, mp1, mp5, mp10 = mean_average_precision(
        database_hash, test_hash,
        database_labels, test_labels,
        args
    )

    print(f"测试集数量: {test_hash.shape[0]}")
    print(f"数据库数量: {database_hash.shape[0]}")
    print(f"哈希码维度: {args.hash_dim}")
    print(f"MAP: {MAP:.4f}")
    print(f"mp@1: {mp1:.4f}")
    print(f"mp@5: {mp5:.4f}")
    print(f"mp@10: {mp10:.4f}")
    print(f"Recall@{args.R}: {Recall:.4f}")


def parse_args():
    parser = argparse.ArgumentParser(description="test of GME")
    parser.add_argument("--dataset", default="isic", help="dataset")
    parser.add_argument("--ckpt-path", required=True, help="the weight of hashlayer")
    parser.add_argument("--lora-path", required=True, help="the weight of LoRA")
    parser.add_argument("--hash-dim", type=int, default=64, help="hash_dim")
    parser.add_argument("--dataset-dir", default="./data/isic", help="dataset dir")
    parser.add_argument("--train-image-list", default="./ISIC-2017_Training_Part3_GroundTruth.csv",
                        help="train image list")
    parser.add_argument("--test-list-file", default="./ISIC-2017_Test_v2_Part3_GroundTruth_balanced.csv",
                        help="test image list")
    parser.add_argument("--batch-size", type=int, default=28, help="batch size")
    parser.add_argument("--workers", type=int, default=4, help="workers")
    parser.add_argument("--R", type=int, default=1000, help="recall@R")
    parser.add_argument("--T", type=float, default=0.0, help="threshold")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    main(args)
