from torch.utils.data import DataLoader
from torchvision.transforms import Resize, CenterCrop, ToTensor, Normalize, Compose
from PIL import Image
try:
    from torchvision.transforms import InterpolationMode
    BICUBIC = InterpolationMode.BICUBIC
except ImportError:
    BICUBIC = Image.BICUBIC
from datalist import Market1501, collate_fn,MSMT,DukeMTMC
from tqdm import tqdm
import torch
from colorama import Fore
import numpy as np
import pickle
import os
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics.pairwise import cosine_similarity
from torchvision import transforms
import importlib
from tqdm import tqdm
def cosine_similarity_matrix(matrix1, matrix2):
    matrix1 = np.array(matrix1)
    matrix2 = np.array(matrix2)
    return cosine_similarity(matrix1, matrix2)

import torch
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
from colorama import Fore
import os
import pickle
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import precision_recall_curve, auc
import gc

import torch
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
from colorama import Fore
import os
import pickle
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import precision_recall_curve, auc
from scipy.sparse import lil_matrix, csr_matrix

def k_reciprocal_neigh(initial_rank, i, k1):
    forward_k_neigh_index = initial_rank[i, :k1 + 1]
    backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
    fi = np.where(backward_k_neigh_index == i)[0]
    return forward_k_neigh_index[fi]

def re_ranking_chunk(q_g_dist_chunk, q_q_dist_chunk, g_g_dist_chunk, k1=20, k2=6, lambda_value=0.3):
    """
    对分块数据进行重排序
    """
    try:
        original_dist = np.concatenate(
            [np.concatenate([q_q_dist_chunk, q_g_dist_chunk], axis=1),
             np.concatenate([q_g_dist_chunk.T, g_g_dist_chunk], axis=1)],
            axis=0)
        original_dist = np.power(original_dist, 2).astype(np.float32)
        original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0))
        V = np.zeros_like(original_dist).astype(np.float32)
        num_q, num_g = q_g_dist_chunk.shape
        N = num_q + num_g
        initial_rank = np.argsort(original_dist).astype(np.int32)

        for i in range(N):
            k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)
            k_reciprocal_expansion_index = k_reciprocal_index
            for j in range(len(k_reciprocal_index)):
                candidate = k_reciprocal_index[j]
                candidate_k_reciprocal_index = k_reciprocal_neigh(initial_rank, candidate, int(np.around(k1 / 2)))
                if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
                        candidate_k_reciprocal_index):
                    k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
            k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
            dist = original_dist[i, k_reciprocal_expansion_index]
            k_reciprocal_expansion_index = k_reciprocal_expansion_index[np.argsort(dist)]
            k_reciprocal_expansion_index = k_reciprocal_expansion_index[:k2]
            weight = np.exp(-dist[np.argsort(dist)[:k2]])
            V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)
        original_dist = original_dist[:num_q, ]
        if lambda_value == 0:
            return original_dist
        invIndex = []
        for i in range(N):
            invIndex.append(np.where(V[:, i] != 0)[0])

        jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)

        for i in range(num_q):
            temp_min = np.zeros(shape=[1, N], dtype=np.float32)
            indNonZero = np.where(V[i, :] != 0)[0]
            indImages = []
            indImages = [invIndex[ind] for ind in indNonZero]
            for j in range(len(indNonZero)):
                temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
                                                                                  V[indImages[j], indNonZero[j]])
            jaccard_dist[i] = 1 - temp_min / (2. - temp_min)

        final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
        final_dist = final_dist[:, num_q:]
        return final_dist
    except Exception as e:
        print(f"重排序分块过程中出现错误: {e}")
        return None

class Eval:
    def __init__(self, model, test_dataset, collate_fn=collate_fn):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
        self.model = model.to(self.device)
        self.test_dataloader = DataLoader(test_dataset, 16, shuffle=False, collate_fn=collate_fn, **kwargs)
        self.model.eval()
        self.image_features = {}
        self.image_person_ids = {}

    @torch.no_grad()
    def run(self):
        try:
            pbar = tqdm(self.test_dataloader, desc=f'Test:',
                        bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
            for images, person_ids, camera_ids, video_ids, filenames in pbar:
                images = images.to(self.device)
                features = self.model(images).cpu().numpy()
                for feature, filename, person_id in zip(features, filenames, person_ids.numpy()):
                    filename = os.path.basename(filename)
                    assert filename not in self.image_features
                    self.image_features[filename] = feature
                    self.image_person_ids[filename] = person_id
            with open('./image_features.pkl', 'wb') as f:
                pickle.dump(self.image_features, f, protocol=pickle.HIGHEST_PROTOCOL)
            with open('./image_person_ids.pkl', 'wb') as f:
                pickle.dump(self.image_person_ids, f, protocol=pickle.HIGHEST_PROTOCOL)
        except Exception as e:
            print(f"运行过程中出现错误: {e}")

    def evaluate(self, chunk_size=100, use_rerank=True, rerank_chunk_size=1000):
        try:
            self.image_features = pickle.load(open('./image_features.pkl', 'rb'))
            self.image_person_ids = pickle.load(open('./image_person_ids.pkl', 'rb'))
            query_dataset = self.test_dataloader.dataset.query_data
            gallery_dataset = self.test_dataloader.dataset.gallery_data
            query_features, query_indexs = [], []
            gallery_features, gallery_indexs = [], []
            for q_data in query_dataset:
                filename = os.path.basename(q_data['image_path'])
                query_features.append(self.image_features[filename])
                query_indexs.append(self.image_person_ids[filename])

            for g_data in gallery_dataset:
                filename = os.path.basename(g_data['image_path'])
                gallery_features.append(self.image_features[filename])
                gallery_indexs.append(self.image_person_ids[filename])

            query_features = np.array(query_features)
            gallery_features = np.array(gallery_features)

            print("Query count:", query_features.shape[0])
            print("Gallery count:", gallery_features.shape[0])

            num_query_chunks = int(np.ceil(query_features.shape[0] / chunk_size))
            rank1_count, rank5_count = 0, 0
            aps = []

            for i in tqdm(range(num_query_chunks)):
                start_idx = i * chunk_size
                end_idx = min((i + 1) * chunk_size, query_features.shape[0])
                query_chunk = query_features[start_idx:end_idx]
                query_chunk_indexs = query_indexs[start_idx:end_idx]

                similarity_matrix_chunk = cosine_similarity(query_chunk, gallery_features)

                if use_rerank:
                    num_rerank_chunks = int(np.ceil(gallery_features.shape[0] / rerank_chunk_size))
                    reranked_similarity_matrix_chunk = lil_matrix(similarity_matrix_chunk.shape)
                    for j in tqdm(range(num_rerank_chunks)):
                        g_start_idx = j * rerank_chunk_size
                        g_end_idx = min((j + 1) * rerank_chunk_size, gallery_features.shape[0])
                        g_chunk = gallery_features[g_start_idx:g_end_idx]
                        q_g_dist_chunk = 1 - cosine_similarity(query_chunk, g_chunk)
                        q_q_dist_chunk = 1 - cosine_similarity(query_chunk, query_chunk)
                        g_g_dist_chunk = 1 - cosine_similarity(g_chunk, g_chunk)
                        reranked_dist_chunk = re_ranking_chunk(q_g_dist_chunk, q_q_dist_chunk, g_g_dist_chunk)
                        if reranked_dist_chunk is None:
                            continue
                        reranked_similarity_matrix_chunk[:, g_start_idx:g_end_idx] = 1 - reranked_dist_chunk
                    similarity_matrix_chunk = reranked_similarity_matrix_chunk.tocsr()

                for j in range(similarity_matrix_chunk.shape[0]):
                    query_pid = query_chunk_indexs[j]
                    sim_scores = similarity_matrix_chunk.getrow(j).toarray().flatten()
                    sorted_indices = np.argsort(sim_scores)[::-1]

                    if gallery_indexs[sorted_indices[0]] == query_pid:
                        rank1_count += 1

                    if query_pid in [gallery_indexs[idx] for idx in sorted_indices[:5]]:
                        rank5_count += 1

                    y_true = np.array([1 if gallery_indexs[idx] == query_pid else 0 for idx in sorted_indices])
                    y_scores = sim_scores[sorted_indices]
                    precision, recall, _ = precision_recall_curve(y_true, y_scores)
                    aps.append(auc(recall, precision))

            rank1_accuracy = rank1_count / len(query_indexs)
            rank5_accuracy = rank5_count / len(query_indexs)
            mAP = np.mean(aps)

            print(f"Rank-1 Accuracy: {rank1_accuracy * 100:.2f}%")
            print(f"Rank-5 Accuracy: {rank5_accuracy * 100:.2f}%")
            print(f"mAP: {mAP * 100:.2f}%")

            return {
               'map': mAP * 100,
                'rank-1': rank1_accuracy * 100,
                'rank-5': rank5_accuracy * 100
            }
        except FileNotFoundError:
            print("未找到特征文件，请先运行 run 方法。")
        except Exception as e:
            print(f"评估过程中出现错误: {e}")
        return None
        
        
        
        
class Eval2:
    def __init__(self, model1, model2, test_dataset):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        kwargs = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}
        self.model1 = model1.to(self.device)
        self.model2 = model2.to(self.device)
        self.test_dataloader = DataLoader(test_dataset, 6, shuffle=False, collate_fn=collate_fn, **kwargs)
        self.model1.eval()
        self.model2.eval()
        self.image_features_combined = {}
        self.image_person_ids = {}

    @torch.no_grad()
    def run(self):
        pbar = tqdm(self.test_dataloader, desc=f'Test:', bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
        for images, person_ids, camera_ids, video_ids, filenames in pbar:
            images = images.to(self.device)
            features1 = self.model1(images).cpu().numpy()
            features2 = self.model2(images).cpu().numpy()
            # 合并两个模型的特征向量
            combined_features = np.concatenate((features1, features2), axis=1)
            for feature, filename, person_id in zip(combined_features, filenames, person_ids.numpy()):
                filename = os.path.basename(filename)
                assert filename not in self.image_features_combined
                self.image_features_combined[filename] = feature
                self.image_person_ids[filename] = person_id
        with open('./image_features_combined.pkl', 'wb') as f:
            pickle.dump(self.image_features_combined, f, protocol=pickle.HIGHEST_PROTOCOL)
        with open('./image_person_ids.pkl', 'wb') as f:
            pickle.dump(self.image_person_ids, f, protocol=pickle.HIGHEST_PROTOCOL)

    def evaluate(self):
        self.image_features_combined = pickle.load(open('./image_features_combined.pkl', 'rb'))
        self.image_person_ids = pickle.load(open('./image_person_ids.pkl', 'rb'))
        query_dataset = self.test_dataloader.dataset.query_data
        gallery_dataset = self.test_dataloader.dataset.gallery_data
        query_features, query_indexs = [], []
        gallery_features, gallery_indexs = [], []

        for q_data in query_dataset:
            filename = os.path.basename(q_data['image_path'])
            query_features.append(self.image_features_combined[filename])
            query_indexs.append(self.image_person_ids[filename])

        for g_data in gallery_dataset:
            filename = os.path.basename(g_data['image_path'])
            gallery_features.append(self.image_features_combined[filename])
            gallery_indexs.append(self.image_person_ids[filename])

        query_features = np.array(query_features)
        gallery_features = np.array(gallery_features)

        print("Query count:", query_features.shape[0])
        print("Gallery count:", gallery_features.shape[0])

        similarity_matrix = cosine_similarity_matrix(query_features, gallery_features)
        print("Similarity matrix shape:", similarity_matrix.shape)

        rank1_count, rank5_count = 0, 0
        aps = []

        for i in tqdm(range(similarity_matrix.shape[0])):
            query_pid = query_indexs[i]
            sim_scores = similarity_matrix[i]
            sorted_indices = np.argsort(sim_scores)[::-1]

            if gallery_indexs[sorted_indices[0]] == query_pid:
                rank1_count += 1

            if query_pid in [gallery_indexs[idx] for idx in sorted_indices[:5]]:
                rank5_count += 1

            y_true = np.array([1 if gallery_indexs[idx] == query_pid else 0 for idx in sorted_indices])
            y_scores = sim_scores[sorted_indices]
            precision, recall, _ = precision_recall_curve(y_true, y_scores)
            aps.append(auc(recall, precision))

        rank1_accuracy = rank1_count / len(query_indexs)
        rank5_accuracy = rank5_count / len(query_indexs)
        mAP = np.mean(aps)

        print(f"Rank-1 Accuracy: {rank1_accuracy * 100:.2f}%")
        print(f"Rank-5 Accuracy: {rank5_accuracy * 100:.2f}%")
        print(f"mAP: {mAP * 100:.2f}%")

        return {
            'map':mAP*100,
            'rank-1':rank1_accuracy*100,
            'rank-5':rank5_accuracy*100
        }
    



if __name__ == "__main__":

    duke_dir = r'F:\floatingAI\reid\DukeMTMC-reID\dukemtmc-reid'
    msmt_dir = r'F:\floatingAI\reid\msmt17\msmt17'
    market_dir = r'F:\floatingAI\reid\Market-1501-v15.09.15'


    pre_process = Compose([
        Resize((256, 128), interpolation=BICUBIC),
        CenterCrop((256, 128)),
        ToTensor(),
        Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    ])


    dataset_dict = {
        'market1501':Market1501(
            pre_process=pre_process,
            data_dir = market_dir,
            extra_annotations=None,
            mode='test',
            color_space='RGB'
        ),
        'dukemtmc':DukeMTMC(
            pre_process=pre_process,
            data_dir = duke_dir,
            extra_annotations=None,
            mode='test',
            color_space='RGB'
        ),
        'msmt':MSMT(
            pre_process=pre_process,
            data_dir = msmt_dir,
            extra_annotatinos=None,
            mode='test',
            color_space='RGB'
        )
    }

    def group_process(model,data_dict):
        for k,v in data_dict.items():
            eval_test=Eval(model,v)
            eval_test.run()
            result = eval_test.evaluate()
            print(k)
            for kk,vv in result.items():
                print(kk,":",vv)
    
    def group_process2(model1,model2,data_dict):
        for k,v in data_dict.items():
            eval_test=Eval2(model1,model2,v)
            eval_test.run()
            result = eval_test.evaluate()
            print(k)
            for kk,vv in result.items():
                print(kk,":",vv)


    # from lth_backup import TransformFeatureExtractor

    # model1 = TransformFeatureExtractor(
    #     weight_path=r"F:\temp2\TEMP\temp_tra\weights\Market1501_clipreid_ViT-B-16_60.pth"
    # )
    # pre_process = Compose([
    #     Resize((256, 128), interpolation=BICUBIC),
    #     CenterCrop((256, 128)),
    #     # lambda image: image.convert("RGB"),
    #     ToTensor(),
    #     Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    # ])
    # group_process(model,dataset_dict)
    # dataset = Market1501(
    #     pre_process=pre_process,
    #     data_dir="/Users/luotianhang/Documents/dataset/reid/Market-1501-v15.09.15",
    #     extra_annotations=None,
    #     mode='test'
    # )
    # eval_test = Eval(model, dataset)
    # eval_test.run()
    # eval_test.evaluate()

    # import sys
    # sys.path.append('/Users/luotianhang/Documents/VScode-work/floatingAI/reid/feature_extract')
    # sys.path.append('/Users/luotianhang/Documents/VScode-work/floatingAI/reid')
    # from feature_extract.myutils import initialize_model
    # from feature_extract.load_dict_method.load_dict_methods import *


    # def register_models():
    #     for folder in os.listdir(os.path.join('./feature_extract', 'model')):
    #         for file in os.listdir(os.path.join('./feature_extract', 'model', folder)):
    #             if file.endswith('.py'):
    #                 module_name = file[:-3]
    #                 importlib.import_module(f'feature_extract.model.{folder}.{module_name}')

    # register_models()
    # model2 = initialize_model('osnet_ain_x1_0','cpu')
    # pre_process = transforms.Compose([
    #     transforms.Resize((256, 128)),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    # ])
    # group_process2(model1,model2,dataset_dict)
    # dataset = Market1501(
    #     pre_process=pre_process,
    #     data_dir=r'F:\floatingAI\reid\Market-1501-v15.09.15',
    #     extra_annotations=None,
    #     mode='test'
    # )
    
    # eval_test = Eval(model, dataset)
    # eval_test.run()
    # eval_test.evaluate()


    # from onnx_infer.infer import HumanAttributeOnnx


    # model = HumanAttributeOnnx()
    # pre_process = Compose([
    #     Resize((256,128)),
    #     ToTensor(),
    #     Normalize(mean=(0.485,0.456,0.406),
    #               std=(0.229,0.224,0.225))
    # ])
    # dataset = Market1501(
    #     pre_process=pre_process,
    #     data_dir="/Users/luotianhang/Documents/dataset/reid/Market-1501-v15.09.15",
    #     extra_annotations=None,
    #     mode='test'
    # )

    # eval_test = Eval(model, dataset)
    # eval_test.run()
    # eval_test.evaluate()
    
    
    import torchreid
    model = torchreid.models.build_model(
        name = 'osnet_ain_x0_25',
        num_classes=751,
        loss = 'softmax',
        pretrained=True
    )
    model.eval()
    
    pre_process = transforms.Compose([
        transforms.Resize((256, 128)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    
    new_dataset_dict = {}
    
    for k,v in dataset_dict.items():
        v.pre_process = pre_process
        new_dataset_dict[k] =v
        
    
    group_process(model,new_dataset_dict)