import torch
import numpy as np
import os
from utils.reranking import re_ranking,re_ranking_numpy
from scipy.spatial.distance import cdist
import pickle
from collections import defaultdict
from tqdm import tqdm
from utils.ficfac_torch import run_fic

def euclidean_distance(qf, gf):
    m = qf.shape[0]
    n = gf.shape[0]
    dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    #  dist_mat.addmm_(1, -2, qf, gf.t())
    dist_mat.addmm_(mat1=qf,mat2=gf.t(), beta=1, alpha=-2)
    return dist_mat.cpu().numpy()

# NOTE 计算高斯核矩阵（新加）
def gaussian_kernel(distance_matrix, sigma):
    return np.exp(-distance_matrix / (2 * sigma ** 2))

def cosine_similarity(qf, gf):
    epsilon = 0.00001
    dist_mat = qf.mm(gf.t())
    qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True)  # mx1
    gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True)  # nx1
    qg_normdot = qf_norm.mm(gf_norm.t())

    dist_mat = dist_mat.mul(1 / qg_normdot).cpu().numpy()
    dist_mat = np.clip(dist_mat, -1 + epsilon, 1 - epsilon)
    dist_mat = np.arccos(dist_mat)
    return dist_mat


def eval_func(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50):
    """Evaluation with market1501 metric
        Key: for each query identity, its gallery images from the same camera view are discarded.
    输入参数：
    - distmat：距离矩阵，表示query图像与gallery图像之间的距离
    - q_pids：query 身份id列表
    - g_pids：gallery 身份id列表
    - q_camids：query 摄像头id列表
    - g_camids：gallery 摄像头id列表
    - max_rank：计算累积匹配曲线时考虑的最大排名

    返回：
    - all_cmc：累积匹配曲线，表示在不同排名下的正确匹配累积数量。这是ReID模型的一个常用评估指标
    - mAP：平均精度
    """
    num_q, num_g = distmat.shape
    # distmat g
    #    q    1 3 2 4
    #         4 1 2 3

    # 检查gallery样本数是否小于max_rank
    if num_g < max_rank:
        max_rank = num_g
        print("Note: number of gallery samples is quite small, got {}".format(num_g))

    # 将距离矩阵的索引按行排序
    indices = np.argsort(distmat, axis=1)
    #  0 2 1 3
    #  1 2 3 0

    # 初始化一个二进制矩阵，指示正确匹配
    matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)

    # 为每个查询计算cmc曲线
    all_cmc = []
    all_AP = []
    num_valid_q = 0.  # 有效查询数
    for q_idx in range(num_q):
        # 获取查询身份标识和摄像头标识
        q_pid = q_pids[q_idx]
        q_camid = q_camids[q_idx]

        # 移除与查询相同身份和摄像头的图库样本
        order = indices[q_idx]  # 选择一行
        remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)# remove用于标记在排序中与查询图像具有相同ID和相机ID的图库图像。这些图像将被从考虑的匹配中移除。
        keep = np.invert(remove)# 是 remove 的逆，即未被移除的元素。它表示在考虑匹配时应该保留的元素。

        # 计算cmc曲线
        # binary vector, positions with value 1 are correct matches
        orig_cmc = matches[q_idx][keep]
        if not np.any(orig_cmc):
            # 当查询身份在图库中不存在时，为true
            continue

        cmc = orig_cmc.cumsum()
        cmc[cmc > 1] = 1

        all_cmc.append(cmc[:max_rank])
        num_valid_q += 1.

        # 计算平均精度
        # reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
        num_rel = orig_cmc.sum()
        tmp_cmc = orig_cmc.cumsum()
        #tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
        y = np.arange(1, tmp_cmc.shape[0] + 1) * 1.0
        tmp_cmc = tmp_cmc / y
        tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
        AP = tmp_cmc.sum() / num_rel
        all_AP.append(AP)

    assert num_valid_q > 0, "Error: all query identities do not appear in gallery"

    # 将cmc曲线列表转换为NumPy数组
    all_cmc = np.asarray(all_cmc).astype(np.float32)

    # 计算最终cmc曲线和平均精度
    all_cmc = all_cmc.sum(0) / num_valid_q
    mAP = np.mean(all_AP)

    return all_cmc, mAP

# NOTE: compute这个方法只是用于算dist_mat，cmc和mAP的计算是用eval_func函数完成的
class R1_mAP_eval():
    def __init__(self, num_query, max_rank=50, feat_norm=True, reranking=False, dataset='aic', reranking_track=False, gaus=False, gaus_sigma = 0.8):
        super(R1_mAP_eval, self).__init__()
        self.num_query = num_query
        self.max_rank = max_rank
        self.feat_norm = feat_norm
        self.reranking = reranking
        self.dataset = dataset
        self.reranking_track = reranking_track
        self.gaus = gaus
        self.gaus_sigma = gaus_sigma
        #  self.fic = fic
        #  self.fac = fac

    def reset(self):
        self.feats = []
        self.pids = []
        self.camids = []
        self.tids = []

    def update(self, output):  # called once for each batch
        feat, pid, camid, trackid = output
        self.feats.append(feat)
        self.pids.extend(np.asarray(pid))
        self.camids.extend(np.asarray(camid))
        self.tids.extend(np.asarray(trackid))
        self.unique_tids = list(set(self.tids))

    def track_ranking(self, qf, gf, gallery_tids, unique_tids, fic=False):
        # 计算原始欧几里得距离矩阵origin_dist，该矩阵表示查询向量query与图库向量gallery之间的差距
        origin_dist = euclidean_distance(qf,gf)
        # 计算高斯核矩阵
        gaus_dist = gaussian_kernel(origin_dist, sigma=self.gaus_sigma)
        # m：query向量数量
        # n：gallery向量数量
        # feature_dim；特征维度
        m, n = qf.shape[0], gf.shape[0]
        feature_dim = qf.shape[1]

        # 将gallery_tids和unique_tids转换为numpy数组
        # tids：target ids
        gallery_tids = np.asarray(gallery_tids)
        m, n = qf.shape[0], gf.shape[0]
        unique_tids = np.asarray(unique_tids)

        # track_gf：用于存储跟踪后的gallery特征向量 gf：gallery feature
        # dist：query和gallery之间的新距离矩阵
        track_gf = np.zeros((len(unique_tids), feature_dim))
        dist = np.zeros((m, n))

        gf_tids = sorted(list(set(gallery_tids)))
        track_gf = []

        # 对每个不同的目标进行处理
        for i, tid in enumerate(gf_tids):
            if self.gaus:
                print('==>> use gaus kernel compute track')
                temp_gaus = gaus_dist[:,gallery_tids == tid]
                gaus_max = np.max(temp_gaus, axis=1)
                index = np.where(gaus_max > 0.6)[0]
                if len(index) < 1:
                    index = np.where(gaus_max == np.max(gaus_max))[0]

                weight = temp_gaus[index,:].mean(axis=0)
                weight = weight/np.sum(weight)
                weight = torch.tensor(weight).cuda().unsqueeze(0)
            else:
                print('==>> use euclidean distance compute track')
                # 提取与当前目标相关的部分
                temp_dist = origin_dist[:,gallery_tids == tid]
                # 计算每个查询向量（query）与当前目标相关的最小距离
                temp_min = np.min(temp_dist,axis=1)
                # 根据条件选择最小距离对应的查询向量（query）索引
                index = np.where(temp_min < 0.6)[0]
                if len(index) < 1:
                    index = np.where(temp_min == np.min(temp_min))[0]
                # 计算权重
                weight = temp_dist[index,:].mean(axis=0)
                weight = 1.0/(weight + 0.01)
                weight = weight/np.sum(weight)
                weight = torch.tensor(weight).cuda().unsqueeze(0)

            # 将权重应用于图库向量（gallery），得到跟踪后的图库特征向量（gallery）
            temp_gf = torch.mm(weight,gf[gallery_tids == tid, :])
            track_gf.append(torch.mm(weight,gf[gallery_tids == tid, :]))

        # 将所有跟踪后的图库特征向量连接成一个张量
        track_gf = torch.cat(track_gf)

        # 使用 re_ranking 函数对查询向量和跟踪后的图库特征向量进行再排序
        origin_track_dist = re_ranking(qf, track_gf, k1=7, k2=2, lambda_value=0.6)
        
        # NOTE：注释掉相机距离和视角距离来使用veri数据集训练
        # 载入相机距离和视角距离
        # cam_dist = np.load('./track_cam_rk.npy')
        # view_dist = np.load('./track_view_rk.npy')

        # # 计算最终的距离矩阵
        # track_dist = origin_track_dist - 0.1* cam_dist - 0.05*view_dist
        track_dist = origin_track_dist
        
        # 将最终的距离矩阵填充到 dist 中，并返回 dist 和 origin_track_dist
        for i, tid in enumerate(gf_tids):
            dist[:, gallery_tids == tid] = track_dist[:, i:(i + 1)]

        return dist, origin_track_dist

    def compute(self, fic=False, fac=False, rm_camera=False, save_dir='./', crop_test = False, la=0.18):
        origin_track_dist = 0
        feats = torch.cat(self.feats, dim=0)

        # 如果crop_test为True，将剪切图片特征和原图片特征进行合并
        if crop_test:
            feats = feats[::2] + feats[1::2]
            self.pids = self.pids[::2]
            self.camids = self.camids[::2]
            self.tids = self.tids[::2]
            self.num_query = int(self.num_query/2)

        # 如果feat_norm为True，对特征进行L2归一化
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1, p=2)  # along channel
        # 打开了一个txt，但是没写任何东西
        # f =  open(os.path.join(save_dir,'fic.txt'), 'w')

        # 获取gallery 特征、PID、CamID、TrackID
        gf = feats[self.num_query:]
        g_pids = np.asarray(self.pids[self.num_query:])
        g_camids = np.asarray(self.camids[self.num_query:])
        gallery_tids = np.asarray(self.tids[self.num_query:])
        # 获取query 特征、PID、CamID
        qf = feats[:self.num_query]
        q_pids = np.asarray(self.pids[:self.num_query])
        q_camids = np.asarray(self.camids[:self.num_query])

        # 如果fic为True，运行run_fic函数，fic是减去摄像头特征
        if fic:
            qf1,gf1 = run_fic(qf, gf, q_camids, g_camids, la = la)

        # 如果reranking_track为True，进行track_based reranking
        if self.reranking_track:
            print('=> Enter track reranking')
            distmat, origin_track_dist = self.track_ranking(qf, gf, gallery_tids, self.unique_tids)
            if fic:
                distmat1, origin_track_dist1= self.track_ranking(qf1, gf1, gallery_tids, self.unique_tids, fic=True)
                distmat = (distmat + distmat1)/2.0
                origin_track_dist = 0.5 * origin_track_dist + 0.5 * origin_track_dist1
        # 如果reranking为True，进行reranking
        elif self.reranking:
            print('=> Enter reranking')
            distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
            if fic:
                distmat += re_ranking(qf1, gf1, k1=50, k2=15, lambda_value=0.3)
        # 否则，使用欧氏距离计算distmat
        else:
            print('=> Computing DistMat with euclidean distance')
            distmat = euclidean_distance(qf, gf)
            if fic:
                distmat += euclidean_distance(qf1, gf1)

        # 如果rm_camera为True，对distmat进行处理
        if rm_camera:
            cam_matches = (g_camids == q_camids[:, np.newaxis]).astype(np.int32)
            distmat = distmat + 10.0*cam_matches
            cam_matches = ((g_camids>=40).astype(np.int32) != (q_camids[:, np.newaxis]>=40).astype(np.int32)).astype(np.int32)
            distmat = distmat + 10.0*cam_matches

        # NOTE 这里，让评估等于0了
        # NOTE 我把这个注释掉了，想看看评估结果 评估结果都是100
        if self.dataset in ['aic','aic_sim','aic_sim_spgan']:
            cmc = [0.0 for i in range(100)]
            mAP = 0.0
            print('No evalution!!!!!!!!!!!!!!!!!!!')
        else:
            cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
            
        #  sort_distmat_index = np.argsort(distmat, axis=1)
        #  print(sort_distmat_index.shape,'sort_distmat_index.shape')
        #  print(sort_distmat_index,'sort_distmat_index')
        #  with open(os.path.join(save_dir, 'track2.txt'), 'w') as f:
            #  for item in sort_distmat_index:
                #  for i in range(99):
                    #  f.write(str(item[i] + 1) + ' ')
                #  f.write(str(item[99] + 1) + '\n')
        #  print('writing result to {}'.format(os.path.join(save_dir, 'track2.txt')))

        #  np.save(os.path.join(save_dir, 'origin_track_dist.npy'), origin_track_dist)
        return cmc, mAP, distmat, self.pids, self.camids, qf, gf
