import os
import gc
import pickle
from colorama import Fore
from tqdm import tqdm
from PIL import Image
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import lil_matrix, csr_matrix
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.transforms import Resize, CenterCrop, ToTensor, Normalize, Compose
try:
    from torchvision.transforms import InterpolationMode
    BICUBIC = InterpolationMode.BICUBIC
except ImportError:
    BICUBIC = Image.BICUBIC
from datalist import Market1501, collate_fn, MSMT, DukeMTMC
from models import ImageEncoder

def cosine_similarity_matrix(matrix1, matrix2):
    """
    计算两个矩阵之间的余弦相似度矩阵

    :param matrix1: 第一个矩阵
    :param matrix2: 第二个矩阵
    :return: 余弦相似度矩阵
    """
    matrix1 = np.array(matrix1)
    matrix2 = np.array(matrix2)
    return cosine_similarity(matrix1, matrix2)


def k_reciprocal_neigh(initial_rank, i, k1):
    """
    计算 k 近邻的 k 互近邻

    :param initial_rank: 初始的排序矩阵
    :param i: 当前索引
    :param k1: k 值
    :return: k 互近邻的索引
    """
    forward_k_neigh_index = initial_rank[i, :k1 + 1]
    backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
    fi = np.where(backward_k_neigh_index == i)[0]
    return forward_k_neigh_index[fi]


class Eval:
    def __init__(self, model, test_dataset, collate_fn):
        """
        评估类的初始化方法

        :param model: 要评估的模型
        :param test_dataset: 测试数据集
        :param collate_fn: 数据整理函数
        """
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
        self.model = model.to(self.device)
        self.test_dataloader = DataLoader(test_dataset, 100, shuffle=False, collate_fn=collate_fn, **kwargs)
        self.model.eval()
        self.image_features = {}
        self.image_person_ids = {}

    @torch.no_grad()
    def run(self):
        """
        运行测试过程，提取图像特征并保存到文件
        """
        pbar = tqdm(self.test_dataloader, desc=f'Test:',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
        for images, person_ids, camera_ids, video_ids, image_paths in pbar:
            images = images.to(self.device)
            features = self.model(x=images,label=None,get_image=True).cpu().numpy()

            for feature, image_path, person_id in zip(features, image_paths, person_ids.numpy()):
                filename = image_path
                self.image_features[filename] = feature
                self.image_person_ids[filename] = person_id

        with open('./image_features.pkl', 'wb') as f:
            pickle.dump(self.image_features, f, protocol=pickle.HIGHEST_PROTOCOL)
        with open('./image_person_ids.pkl', 'wb') as f:
            pickle.dump(self.image_person_ids, f, protocol=pickle.HIGHEST_PROTOCOL)

    def evaluate(self, chunk_size=100):
        """
        评估模型的性能，计算 Rank-1 准确率、Rank-5 准确率和平均精度均值（mAP）

        :param chunk_size: 分块处理查询特征时的块大小
        :param use_rerank: 是否使用重排序
        :param rerank_chunk_size: 重排序时处理图库特征的块大小
        :return: 包含 mAP、Rank-1 准确率、Rank-5 准确率的字典
        """
        self.image_features = pickle.load(open('./image_features.pkl', 'rb'))
        self.image_person_ids = pickle.load(open('./image_person_ids.pkl', 'rb'))
        query_dataset = self.test_dataloader.dataset.query_data
        gallery_dataset = self.test_dataloader.dataset.gallery_data
        query_features, query_indexs = [], []
        gallery_features, gallery_indexs = [], []

        # 提取查询集的特征和标签
        for q_data in query_dataset:
            filename = q_data['image_path']
            query_features.append(self.image_features[filename])
            query_indexs.append(self.image_person_ids[filename])

        # 提取图库集的特征和标签
        for g_data in gallery_dataset:
            filename = g_data['image_path']
            gallery_features.append(self.image_features[filename])
            gallery_indexs.append(self.image_person_ids[filename])

        query_features = np.array(query_features)
        gallery_features = np.array(gallery_features)

        print("Query count:", query_features.shape[0])
        print("Gallery count:", gallery_features.shape[0])

        num_query_chunks = int(np.ceil(query_features.shape[0] / chunk_size))
        rank1_count, rank5_count = 0, 0
        aps = []

        for i in tqdm(range(num_query_chunks)):
            start_idx = i * chunk_size
            end_idx = min((i + 1) * chunk_size, query_features.shape[0])
            query_chunk = query_features[start_idx:end_idx]
            query_chunk_indexs = query_indexs[start_idx:end_idx]

            similarity_matrix_chunk = cosine_similarity(query_chunk, gallery_features)

            for j in range(similarity_matrix_chunk.shape[0]):
                query_pid = query_chunk_indexs[j]
                try:
                    sim_scores = similarity_matrix_chunk[j].flatten()
                except AttributeError:
                    sim_scores = similarity_matrix_chunk.getrow(j).toarray().flatten()
                sorted_indices = np.argsort(sim_scores)[::-1]

                if gallery_indexs[sorted_indices[0]] == query_pid:
                    rank1_count += 1

                if query_pid in [gallery_indexs[idx] for idx in sorted_indices[:5]]:
                    rank5_count += 1

                y_true = np.array([1 if gallery_indexs[idx] == query_pid else 0 for idx in sorted_indices])
                y_scores = sim_scores[sorted_indices]
                precision, recall, _ = precision_recall_curve(y_true, y_scores)
                aps.append(auc(recall, precision))

        rank1_accuracy = rank1_count / len(query_indexs)
        rank5_accuracy = rank5_count / len(query_indexs)
        mAP = np.mean(aps)

        print(f"Rank-1 Accuracy: {rank1_accuracy * 100:.2f}%")
        print(f"Rank-5 Accuracy: {rank5_accuracy * 100:.2f}%")
        print(f"mAP: {mAP * 100:.2f}%")

        return {
           'map': mAP * 100,
            'rank-1': rank1_accuracy * 100,
            'rank-5': rank5_accuracy * 100
        }

if __name__ == "__main__":


    market1501_dir = '/home/dev/luotianhang/work_item/data_item/reid/Market-1501-v15.09.15'

    dataset_dict = {
        'market1501':Market1501(
            pre_process=None,
            data_dir = market1501_dir,
            extra_annotations=None,
            mode='test',
            color_space='RGB',
        )
        }
    
    model = ImageEncoder(num_class=1)
    model = torch.nn.DataParallel(model)
    for i in range(1,40,1):
        checkpoint_path = f'./weights/clip-reid/stage2/{i}.pth'
        checkpoint = torch.load(checkpoint_path,map_location='cpu')
        model_dict = model.module.image_encoder.state_dict()
        pretrained_dict = checkpoint['model_state_dict']
        model_dict.update(pretrained_dict)
        model.module.image_encoder.load_state_dict(model_dict,strict=True)

        eval_test=Eval(model,dataset_dict['market1501'],None)
        eval_test.run()
        result = eval_test.evaluate()
        print(i)
        for kk,vv in result.items():
            print(kk,":",vv)

    