# eval.py
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from data import get_data_loaders  # 根据你的数据集类调整
from trainone import CrossModalModel 
from model import FasterViT, TextFeatureExtractor

def encode_data(model, data_loader, device):
    """编码所有图像和文本特征"""
    model.eval()
    img_features = []
    txt_features = []
    all_img_ids = []
    all_txt_ids = []
    
    with torch.no_grad():
        for images, txt_emb, img_ids in tqdm(data_loader, desc="Encoding"):
            # 确保每个图像的5个描述连续存储
            images = images.to(device)
            img_feats = model.img_encoder.forward_features(images).to(device)
            img_feats = F.adaptive_avg_pool2d(img_feats, (1, 1)).flatten(1)
            img_feats = model.img_proj(img_feats)
            img_features.append(img_feats.cpu())
            
            # 生成对应的文本ID（假设每个图像有5个描述）
            txt_ids = []
            for img_id in img_ids:
                txt_ids.extend([img_id * 5 + i for i in range(5)])
            all_txt_ids.extend(txt_ids)
            
            # 处理文本特征
            txt_emb = txt_emb.to(device)
            txt_feats = model.txt_encoder(txt_emb)
            txt_feats = torch.mean(txt_feats, dim=1)
            txt_feats = model.txt_proj(txt_feats)
            txt_features.append(txt_feats.cpu())
            
            all_img_ids.extend(img_ids)
    
    # 按图像ID排序
    img_features = torch.cat(img_features, dim=0)
    img_features = img_features[np.argsort(all_img_ids)]
    
    # 按文本ID排序
    txt_features = torch.cat(txt_features, dim=0)
    txt_features = txt_features[np.argsort(all_txt_ids)]
    
    return img_features, txt_features

def compute_similarity(img_features, txt_features):
    """计算全局相似度矩阵"""
    img_features = F.normalize(img_features, p=2, dim=-1)
    txt_features = F.normalize(txt_features, p=2, dim=-1)
    return torch.mm(img_features, txt_features.t())  # [N_img, N_txt]

def evaluate_i2t(sim_matrix, n_captions_per_img=5):
    """图像到文本评估"""
    n_img = sim_matrix.shape[0]
    ranks = np.zeros(n_img)
    
    for i in range(n_img):
        # 每个图像对应5个文本描述
        targets = np.arange(i * n_captions_per_img, (i+1) * n_captions_per_img)
        scores = sim_matrix[i, :].cpu().numpy()
        sorted_idx = np.argsort(-scores)
        rank = 1e20
        
        for target in targets:
            # 添加越界检查
            if target >= len(sorted_idx):
                continue  # 跳过无效目标
            matches = np.where(sorted_idx == target)[0]
            if len(matches) > 0:
                tmp = matches[0]
                if tmp < rank:
                    rank = tmp
        
        ranks[i] = rank + 1 if rank != 1e20 else 1e20
    
    # 过滤无效样本
    valid_ranks = ranks[ranks != 1e20]
    if len(valid_ranks) == 0:
        return 0.0, 0.0, 0.0
    
    r1 = 100.0 * np.sum(valid_ranks <= 1) / len(valid_ranks)
    r5 = 100.0 * np.sum(valid_ranks <= 5) / len(valid_ranks)
    r10 = 100.0 * np.sum(valid_ranks <= 10) / len(valid_ranks)
    return r1, r5, r10


def evaluate_t2i(sim_matrix, n_captions_per_img=5):
    """文本到图像评估"""
    n_txt = sim_matrix.shape[1]
    ranks = np.zeros(n_txt)
    
    for i in range(n_txt):
        # 每个文本对应1个图像
        img_idx = i // n_captions_per_img
        scores = sim_matrix[:, i].cpu().numpy()
        sorted_idx = np.argsort(-scores)
        rank = np.where(sorted_idx == img_idx)[0][0]
        ranks[i] = rank + 1
    
    r1 = 100.0 * np.sum(ranks <= 1) / n_txt
    r5 = 100.0 * np.sum(ranks <= 5) / n_txt
    r10 = 100.0 * np.sum(ranks <= 10) / n_txt
    return r1, r5, r10

def evaluate(model, test_loader, device, n_captions=5):
    """完整评估流程"""
    img_feats, txt_feats = encode_data(model, test_loader, device)
    
    # 打印关键形状信息
    print(f"图像特征维度: {img_feats.shape}, 文本特征维度: {txt_feats.shape}")
    assert txt_feats.shape[0] == img_feats.shape[0] * n_captions, \
        f"文本数量 {txt_feats.shape[0]} != 图像数量 {img_feats.shape[0]} × 5"
    
    sim_matrix = compute_similarity(img_feats.to(device), txt_feats.to(device))
    print(f"✅ sim_matrix.shape: {sim_matrix.shape}")
    print(sim_matrix)
    
    # 评估指标
    i2t_r1, i2t_r5, i2t_r10 = evaluate_i2t(sim_matrix, n_captions)
    t2i_r1, t2i_r5, t2i_r10 = evaluate_t2i(sim_matrix, n_captions)
    
    return {
        "i2t": {"R@1": i2t_r1, "R@5": i2t_r5, "R@10": i2t_r10},
        "t2i": {"R@1": t2i_r1, "R@5": t2i_r5, "R@10": t2i_r10},
        "rsum": i2t_r1 + i2t_r5 + i2t_r10 + t2i_r1 + t2i_r5 + t2i_r10
    }


if __name__ == "__main__":
    # 配置参数
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model_path = "best_model.pth"
    
    # 初始化模型
    faster_vit = FasterViT(
        dim=96, in_dim=64, depths=[2, 2, 6, 2],
        window_size=[7, 7, 7, 7], ct_size=1, mlp_ratio=4,
        num_heads=[3, 6, 12, 24], resolution=224, num_classes=0
    )
    txt_encoder = TextFeatureExtractor(
        input_dim=768, hidden_dim=512, num_tencoder=4,
        num_heads=8, max_seq_len=64, dropout=0.1
    )

    model = CrossModalModel(faster_vit, txt_encoder, 512, 512)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.to(device)
    
    # 准备测试集
    train_loader, val_loader, test_loader = get_data_loaders()
    
    # 执行评估
    metrics = evaluate(model, val_loader, device)
    
    # 打印结果
    print(f"[Image-to-Text] R@1: {metrics['i2t']['R@1']:.1f} R@5: {metrics['i2t']['R@5']:.1f} R@10: {metrics['i2t']['R@10']:.1f}")
    print(f"[Text-to-Image] R@1: {metrics['t2i']['R@1']:.1f} R@5: {metrics['t2i']['R@5']:.1f} R@10: {metrics['t2i']['R@10']:.1f}")
    print(f"RSUM: {metrics['rsum']:.1f}")