import numpy as np
import matplotlib.pyplot as plt
from scipy.special import comb
import random
from scipy.linalg import hadamard, eig
import copy
import gc
import os
import time
from tqdm import tqdm
import json
import scipy.sparse.linalg as linalg
from scipy.sparse import csc_matrix
import copy
import time
import torch
from utils import run_kmeans,compute_features
import pdb
import h5py
import numpy as np
import scipy.io as sio
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import confusion_matrix
# Hamming distance to inner product <> = bit-2d
# inner product to Hamming distance d = 1/2(bit-<>)

def get_margin(bit, n_class):
    # 哈希码的长度
    L = bit
    # 平均每一个类别的哈希码种类数
    right = (2 ** L) / n_class

    d_min = 0
    d_max = 0

    for j in range(2 * L + 4):
        dim = j
        sum_1 = 0
        sum_2 = 0
        for i in range((dim - 1) // 2 + 1):
            sum_1 += comb(L, i)
        for i in range((dim) // 2 + 1):
            sum_2 += comb(L, i)
        if sum_1 <= right and sum_2 > right:
            d_min = dim
    for i in range(2 * L + 4):
        dim = i
        sum_1 = 0
        sum_2 = 0
        for j in range(dim):
            sum_1 += comb(L, j)
        for j in range(dim - 1):
            sum_2 += comb(L, j)
        if sum_1 >= right and sum_2 < right:
            d_max = dim
            break

    alpha_neg = L - 2 * d_max

    alpha_pos = L

    return d_max, d_min


def CSQ_init(n_class, bit):
    """
    Hadamard Matrix for Hash Center Initialization
    """
    h_k = hadamard(bit)
    h_2k = np.concatenate((h_k, -h_k), 0)
    hash_center = h_2k[:n_class]

    if h_2k.shape[0] < n_class:
        hash_center = np.resize(hash_center, (n_class, bit))
        for k in range(10):
            for index in range(h_2k.shape[0], n_class):
                ones = np.ones(bit)
                ones[random.sample(list(range(bit)), bit // 2)] = -1
                hash_center[index] = ones
            c = []
            for i in range(n_class):
                for j in range(i, n_class):
                    c.append(sum(hash_center[i] != hash_center[j]))
            c = np.array(c)
            if c.min() > bit / 4 and c.mean() >= bit / 2:
                break
    return hash_center


def init_hash(n_class, bit):
    """
    "Random initialization of hash centers"
    """
    hash_centers = -1 + 2 * np.random.random((n_class, bit))
    hash_centers = np.sign(hash_centers)
    return hash_centers



def cal_Cx(x, H):
    return np.dot(H, x)



def cal_M(H):
    return np.dot(H.T, H) / H.shape[0]



def cal_b(H):
    return np.dot(np.ones(H.shape[0], dtype=np.float64), H) / H.shape[0]



def cal_one_hamm(b, H):
    temp = 0.5 * (b.shape[0] - np.dot(H, b))
    return temp.mean() + temp.min(), temp.min()



def cal_hamm(H):
    """
    This function is used to calculate statistical information about the Hamming distances between hash codes.
    """
    # 用于存储所有 Hamming 距离
    dist = []
    for i in range(H.shape[0]):
        for j in range(i + 1, H.shape[0]):
            TF = np.sum(H[i] != H[j])
            dist.append(TF)
    dist = np.array(dist)
    st = dist.sum()

    return st, dist.mean(), dist.min(), dist.var(), dist.max()

def cos_simi(vec1, vec2):
    dot_product = np.dot(vec1, vec2)
    norm_a = np.linalg.norm(vec1)
    norm_b = np.linalg.norm(vec2)
    return dot_product / (norm_a * norm_b)

def eval_metrics(H,W):
    hamming_distance, _, _, _, _ = cal_hamm(H)
    
    K = H.shape[0]
    Semantic_Consistency = 0
    for i in range(H.shape[0]):
        for j in range(i + 1, H.shape[0]):
            
            Semantic_Consistency += cos_simi(H[i],H[j]) - cos_simi(W[i],W[j])
    print(hamming_distance/(K*K))
    # pdb.set_trace()
    return hamming_distance/(K*K) - Semantic_Consistency


def in_range(z1, z2, z3, bit):
    flag = True
    for item in z1:
        if item < -1 and item > 1:
            flag = False
            return flag
    for item in z3:
        if item < 0:
            flag = False
            return flag
    res = 0
    for item in z2:
        res += item ** 2
    if abs(res - bit) > 0.001:
        flag = False
        return flag
    return flag



def get_min(b, H):
    temp = []
    for i in range(H.shape[0]):
        TF = np.sum(b != H[i])
        temp.append(TF)
    temp = np.array(temp)
    # print(temp.min())
    return temp.min()



def Lp_box_one(b, H, d_max, n_class, bit, rho, gamma, error, W_ex, Wi):
    """
    根据一些数学和优化方法来更新给定的哈希中心 b
    """
    b = b.astype(np.float64)
    H = H.astype(np.float64)

    # 计算哈希码的差异
    d = bit - 2 * d_max

    Wei_ = np.dot(W_ex, Wi)
    Wei_mean = np.mean(Wei_)
    Wei_ -= Wei_mean
    Wei = -bit + (Wei_ - min(Wei_)) / (max(Wei_) - min(Wei_)) * (bit + bit)
    Wei = Wei.astype(np.float64)

    M = cal_M(H)  # n x n
    C = cal_b(H)  # n x 1
    out_iter = 10000
    in_iter = 10
    upper_rho = 1e9
    learning_fact = 1.07
    count = 0
    best_eval, best_min = cal_one_hamm(np.sign(b), H)
    best_B = b

    z1 = b.copy()
    z2 = b.copy()
    z3 = d - cal_Cx(np.sign(b), H)
    y1 = np.random.rand(bit)
    y2 = np.random.rand(bit)
    y3 = np.random.rand(n_class - 1)

    z1 = z1.astype(np.float64)
    z2 = z2.astype(np.float64)
    z3 = z3.astype(np.float64)
    y1 = y1.astype(np.float64)
    y2 = y2.astype(np.float64)
    y3 = y3.astype(np.float64)
    alpha = 1.0

    for e in range(out_iter):
        for ei in range(in_iter):

            left = ((rho + rho) * np.eye(bit, dtype=np.float64) + (rho + 2 * alpha) * np.dot(H.T, H))
            left = left.astype(np.float64)
            right = (rho * z1 + rho * z2 + rho * np.dot(H.T, (d - z3)) - y1 - y2 - np.dot(H.T,
                                                                                          y3) - C + 2 * alpha * np.dot(
                H.T, Wei))
            right = right.astype(np.float64)
            b = np.dot(np.linalg.inv(left), right)

            z1 = b + 1 / rho * y1

            z2 = b + 1 / rho * y2

            z3 = d - np.dot(H, b) - 1 / rho * y3

            if in_range(z1, z2, z3, bit):
                y1 = y1 + gamma * rho * (b - z1)
                y2 = y2 + gamma * rho * (b - z2)
                y3 = y3 + gamma * rho * (np.dot(H, b) + z3 - d)
                break
            else:
                z1[z1 > 1] = 1
                z1[z1 < -1] = -1

                norm_x = np.linalg.norm(z2)
                z2 = np.sqrt(bit) * z2 / norm_x

                z3[z3 < 0] = 0

                y1 = y1 + gamma * rho * (b - z1)
                y2 = y2 + gamma * rho * (b - z2)
                y3 = y3 + gamma * rho * (np.dot(H, b) + z3 - d)

        rho = min(learning_fact * rho, upper_rho)
        if rho == upper_rho:
            count += 1
            eval, mini = cal_one_hamm(np.sign(b), H)
            if eval > best_eval:
                best_eval = eval
                best_min = mini
                best_B = np.sign(b)
        if count == 100:
            # best_B = np.sign(b)
            break

    # best_B = np.sign(b)
    return best_B, H



def Lp_box(B, best_B, n_class, d_max, bit, rho, gamma, error, best_st, W):
    """
    通过优化哈希中心矩阵 B,提升哈希码的质量,并通过迭代寻找最优的哈希中心。
    函数通过多次迭代更新哈希中心矩阵 B,并在某些条件满足时停止优化,返回最佳的哈希中心矩阵 best_B
    """
    count = 0
    for oo in range(20):
        for i in range(n_class):
            # H 是除去第 i 个哈希中心后的哈希矩阵（即排除了 B[i]）
            H = np.vstack((B[:i], B[i + 1:]))  # m-1 x n
            # W_ex 是除去第 i 个类别权重的权重矩阵
            W_ex = np.vstack((W[:i], W[i + 1:]))
            # Wi 是第 i 个类别的权重向量
            Wi = W[i]
            # 根据其余类别的哈希中心和相关权重来更新当前类别的哈希中心
            B[i], _ = Lp_box_one(B[i], H, d_max, n_class, bit, rho, gamma, error, W_ex, Wi)

        eval_st = eval_metrics(B,W)

        # pdb.set_trace()

        print(eval_st)

        if eval_st > best_st:
            best_st = eval_st
            best_B = B.copy()
            count = 0
        else:
            count += 1
        if count >= 5:
            break

    return best_B
    


def getBestHash(cfg, args, train_loader, bit, device, initWithCSQ=False, rho = 5e-5, gamma = (1 + 5 ** 0.5) / 2, error = 1e-5):
    
    # 从 HDF5 文件加载矩阵
    with h5py.File('best_B.h5', 'r') as f:
        best_B = f['matrix'][:]
    # similarity = cosine_similarity(best_B)
    # print("余弦相似度矩阵:\n", similarity[0])
    # pdb.set_trace()

    # 从 HDF5 文件加载矩阵
    with h5py.File('im2cluster.h5', 'r') as f:
        im2cluster = f['matrix'][:]

    best_B = torch.tensor(best_B).to(device)
    im2cluster = torch.tensor(im2cluster).to(device)

    cluster_result = {'im2cluster':[],'centroids':[]}   

    cluster_result['im2cluster'].append(im2cluster)
    cluster_result['centroids'].append(best_B)

    return best_B, cluster_result

def getBestHash1(cfg, args, train_loader, bit, device, initWithCSQ=False, rho = 5e-5, gamma = (1 + 5 ** 0.5) / 2, error = 1e-5):

    # 降维后的shape为(B,C')
    features = compute_features(train_loader, cfg,device) 

    # features = features.numpy()
    # centroids:embedding的中心，语义中心
    im2cluster, W = run_kmeans(features,cfg,args.gpu,device)  #run kmeans clustering on master node
    W_numpy = W.cpu().numpy()
    with h5py.File('W.h5', 'w') as f:
            f.create_dataset('matrix', data=W_numpy)

    # placeholder for clustering result
    cluster_result = {'im2cluster':[],'centroids':[]}   

    cluster_result['im2cluster'].append(im2cluster)

    W = W.cpu().numpy()  # 将 W 从 GPU 转移到 CPU 并转换为 numpy 数组
    n_class = cfg.nclusters

    # 决定是否使用 CSQ 方法初始化哈希中心
    initWithCSQ = True

    d_max, d_min = get_margin(bit, n_class)
    d_max = 0
    print(f"d_max is {d_max}, d_min is {d_min}")

    # parameter initialization
    rho = 5e-5
    gamma = (1 + 5 ** 0.5) / 2
    error = 1e-5
    # hash centers initialization
    random.seed(80)
    np.random.seed(80)

    d = bit - 2 * d_max
    if initWithCSQ:
        B = CSQ_init(n_class, bit)  # initialize with CSQ
    else:
        B = init_hash(n_class, bit)  # random initialization
        # B = np.ones((n_class, bit),np.int32)  # random initialization
    
    # metric initialization
    # 计算并输出当前哈希中心的汉明距离统计信息
    best_st, best_mean, best_min, best_var, best_max = cal_hamm(B)

    best_st = eval_metrics(B,W)

    best_B = copy.deepcopy(B)

    print(
        f"best_st is {best_st}, best_min is {str(best_min)}, best_mean is {best_mean}, best_var is {best_var}, best_max is {str(best_max)}")
    best_st = -999999
    print(f"eval st, eval min, eval mean, eval var, eval max")
    begin = time.time()
    time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(begin))
    print(time_string)

    # 更新哈希中心
    best_B = Lp_box(B, best_B, n_class, d_max, bit, rho, gamma, error, best_st, W)

    end = time.time()
    time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end))

    print(time_string)

    ev_st, ev_mean, ev_min, ev_var, ev_max = cal_hamm(best_B)
    print(
        f"ev_st is {ev_st}, ev_min is {str(ev_min)}, ev_mean is {ev_mean}, ev_var is {ev_var}, ev_max is {str(ev_max)}")
    

    best_B = torch.tensor(best_B).to(device)
    cluster_result['centroids'].append(best_B)
    best_B_numpy = best_B.cpu().numpy()
    im2cluster_numpy = im2cluster.cpu().numpy()
    # 保存矩阵到 HDF5 文件
    with h5py.File('best_B.h5', 'w') as f:
        f.create_dataset('matrix', data=best_B_numpy)

    with h5py.File('im2cluster.h5', 'w') as f:
        f.create_dataset('matrix', data=im2cluster_numpy)

    return best_B, cluster_result


def cluster_accuracy(y_true, y_pred):
    # Step 1: 计算混淆矩阵
    cm = confusion_matrix(y_true, y_pred)
    
    # Step 2: 使用匈牙利算法找到最佳匹配
    row_ind, col_ind = linear_sum_assignment(-cm)  # 最大化匹配
    
    # Step 3: 计算准确率
    matched_count = cm[row_ind, col_ind].sum()
    accuracy = matched_count / len(y_true)
    print(accuracy)
    return accuracy


if __name__ == '__main__':

    for bit in [8]:
        # load the semantic categories saved in weight folder
        # 加载存储的语义类别权重矩阵
        # W = np.load("weight/ResNet_car_ims_class_head_0.005.npy")
        W = np.random.rand(4, 5)
        n_class = 4
        # 决定是否使用 CSQ 方法初始化哈希中心
        initWithCSQ = True

        if bit == 48:
            initWithCSQ = False

        d_max, d_min = get_margin(bit, n_class)
        d_max = 0
        print(f"d_max is {d_max}, d_min is {d_min}")

        # parameter initialization
        rho = 5e-5
        gamma = (1 + 5 ** 0.5) / 2
        error = 1e-5
        # hash centers initialization
        random.seed(80)
        np.random.seed(80)

        d = bit - 2 * d_max
        if initWithCSQ:
            B = CSQ_init(n_class, bit)  # initialize with CSQ
        else:
            B = init_hash(n_class, bit)  # random initialization
        
        # metric initialization
        # 计算并输出当前哈希中心的汉明距离统计信息
        best_st, best_mean, best_min, best_var, best_max = cal_hamm(B)

        best_B = copy.deepcopy(B)
        count = 0
        error_index = {}
        print(
            f"best_st is {best_st}, best_min is {str(best_min)}, best_mean is {best_mean}, best_var is {best_var}, best_max is {str(best_max)}")
        best_st = 0
        print(f"eval st, eval min, eval mean, eval var, eval max")
        begin = time.time()
        time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(begin))
        print(time_string)

        # 更新哈希中心
        best_B = Lp_box(B, best_B, n_class, d_max, bit, rho, gamma, error, best_st, W)


        end = time.time()
        time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end))

        print(time_string)

        ev_st, ev_mean, ev_min, ev_var, ev_max = cal_hamm(best_B)
        print(
            f"ev_st is {ev_st}, ev_min is {str(ev_min)}, ev_mean is {ev_mean}, ev_var is {ev_var}, ev_max is {str(ev_max)}")
        if (ev_min >= d_max):
            print("!!!!找到一个优化的解!!!!")

            

