import numpy as np
import scipy.io as sio 
import scipy.sparse as sp
from utils.bar_show import progress_bar

import torch
import torch.nn as nn
import torch.nn.functional as F


def calc_hammingDist(B1, B2):
    q = B2.shape[1]
    if len(B1.shape) < 2:
        B1 = B1.unsqueeze(0)
    distH = 0.5 * (q - B1.mm(B2.transpose(0, 1)))
    return distH

def l2_norm(input):
    input_size = input.size()
    buffer = torch.pow(input, 2)
    normp = torch.sum(buffer, 1).add_(1e-12)
    norm = torch.sqrt(normp)
    _output = torch.div(input, norm.view(-1, 1).expand_as(input))
    output = _output.view(input_size)
    return output


def mAP(cateTrainTest, IX, num_return_NN=None):
    numTest, numTrain = IX.shape

    num_return_NN = numTrain if not num_return_NN else num_return_NN

    apall = np.zeros((numTest, 1))
    yescnt_all = np.zeros((numTest, 1))
    for qid in range(numTest):
        query = IX[qid, :] # 原代码提供，这种在查询和检索数据集一样的情况下适用，查询和检索数据集不一样的时候不适用（我），20241212
        # query = IX[:, qid] # 适应查询和检索数据集不一样的情况（我），20241212
        x, p = 0, 0

        for rid in range(num_return_NN):
            if cateTrainTest[qid, query[0,rid]]: # query[rid]为找到检索样本，然后比较该样本是否与qid查询样本标签一致（我），20241212
                x += 1
                p += x/(rid*1.0 + 1.0)
        yescnt_all[qid] = x
        if not p: apall[qid] = 0.0
        else: apall[qid] = p/(x*1.0)

    return np.mean(apall),apall,yescnt_all  

def calc_map_k(qB, rB, query_L, retrieval_L, k=None):

    num_query = query_L.shape[0]
    qB = torch.sign(qB)
    rB = torch.sign(rB)
    map = 0
    if k is None:
        k = retrieval_L.shape[0]
    for iter in range(1000):  # 为加快速度，将查询集数量改为前10000
        q_L = query_L[iter]
        if len(q_L.shape) < 2:
            q_L = q_L.unsqueeze(0)      # [1, hash length]
        gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32)
        tsum = torch.sum(gnd)
        if tsum == 0:
            continue
        hamm = calc_hammingDist(qB[iter, :], rB)
        _, ind = torch.sort(hamm)
        ind.squeeze_()
        gnd = gnd[ind]
        total = min(k, int(tsum))
        count = torch.arange(1, total + 1).type(torch.float32)
        tindex = torch.nonzero(gnd)[:total].squeeze().type(torch.float32) + 1.0
        if tindex.is_cuda:
            count = count
        map = map + torch.mean(count / tindex)
        # progress_bar(iter, 10000)
    # map = map / num_query
    map = map / 1000  # 改为1000（我），20241213
    return map,_,_ #

def topK(cateTrainTest, HammingRank, k=500):
    numTest = cateTrainTest.shape[1]

    precision = np.zeros((numTest, 1))
    recall = np.zeros((numTest, 1))

    topk = HammingRank[:k, :]

    for qid in range(numTest):
        retrieved = topk[:, qid]
        rel = cateTrainTest[retrieved, qid]
        retrieved_relevant_num = np.sum(rel)
        real_relevant_num = np.sum(cateTrainTest[:, qid])

        precision[qid] = retrieved_relevant_num/(k*1.0)
        recall[qid] = retrieved_relevant_num/(real_relevant_num*1.0)

    return precision.mean(), recall.mean()