import pickle
import torch
import time
import numpy as np
from sklearn.cluster import KMeans
from concurrent.futures import ThreadPoolExecutor, as_completed


class KMEANS:
    def __init__(self, n_clusters = 20,max_iter = None, verbose = True, device = None):
        self.n_clusters = n_clusters
        self.centers = None
        self.verbose = verbose
        self.device = device
        self.max_iter = max_iter
        self.started = False
        self.count = 0
        self.variation = torch.Tensor([float("Inf")]).to(device)
    def nearest_center(self,x):
        # caculate distance nearest node and center
        labels = torch.empty((len(x),)).to(self.device)
        dists = torch.empty((0,self.n_clusters)).to(self.device)
        for i, sample in enumerate(x):
            dist = torch.sum(torch.mul(sample-self.centers, sample - self.centers),1)
            labels[i] = torch.argmin(dist)
            dists = torch.cat([dists,dist.unsqueeze(0)], 0)
        self.labels = labels
        if self.started:
            self.variation = torch.sum(self.dists - dists)
        self.dists = dists
        self.started = True

    def update_center(self, x):
        # update center
        centers = torch.empty((0,len(x[0]))).to(self.device)
        for i in range(self.n_clusters):
            mask = self.labels == i
            cluster_samples = x[mask]
            centers = torch.cat([centers, torch.mean(cluster_samples, (0)).unsqueeze(0)],0)
        self.centers = centers

    def representative_sample(self):
        # nearest sample
        self.representative_samples = torch.argmin(self.dists, (0))

    def fit(self, x):
        # inint centroids,x [N,topk, dims]
        init_row = torch.randint(0, len(x),(self.n_clusters,)).to(self.device)
        init_points = x[init_row]
        self.centers = init_points

        while True:
            self.nearest_center(x)
            self.update_center(x)
            if self.verbose:
                print(self.variation,torch.argmin(self.dists,(0)))
            if torch.abs(self.variation) < 1e-3 and self.max_iter is not None:
                break
            elif self.max_iter is not None and self.count == self.max_iter:
                break

            self.count += 1
            print(self.count)
        self.representative_sample()
        return self.labels, self.centers

def cluster(input, n_clusters,max_iter):
    start = time.time()
    device = torch.device("cuda:0")
    k = KMEANS(n_clusters = n_clusters,max_iter=max_iter,device = device)
    label_, centers_ = k.fit(input)
    # kmeans = KMeans(n_clusters, init='k-means++', random_state=42)
    # y_kmeans = kmeans.fit(input)
    # label_ = y_kmeans.labels_
    # centers_ = y_kmeans.cluster_centers_
    # print(label_,centers_)

    end = time.time()
    return (end - start),label_,centers_

def choose_device(cuda=False):
    if cuda:
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")
    return device

def ReadFromFile(data_features):
    # read feature
    features_for_clustering = torch.empty((0, data_features[0]['descriptor_np_list'][0].shape[0])).to(device)

    images_descrip = []
    num = 0
    for feature in data_features:
        # assert 'attention_np_list' in feature.keys()
        # images_descrip : 聚类对应的图片名列表
        # features_for_clustering ：特征
        descrip = torch.from_numpy(feature['descriptor_np_list'][0]).unsqueeze(0).float().to(device)

        features_for_clustering = torch.cat((features_for_clustering, descrip),0)
        images_descrip.append(feature['filename'][0])
        num += 1
        print('load:',num)
    return  images_descrip, features_for_clustering

def cluster_kmeas(n_clusters, max_iter,images_descrip,features_for_clustering):
    # print(features_for_clustering)

    speed, label, center = cluster(features_for_clustering, n_clusters, max_iter)

    center_ = [[center[i][j].item() for j in range(center.shape[1])] for i in range(center.shape[0])]
    cluster_list = []
    cluster_list.append(center_)
    dic = {}
    for i in range(n_clusters):
        dic.update({i: []})
    for i, j in zip(images_descrip, label):

        dic[int(j.item())].append(i)

    cluster_list.append(dic)
    return cluster_list


def cluster_level_0(max_iter, n_clusters, data_features):
    # level_1 聚类

    writh_file_path = r'/home/ubuntu/data/Image_Retrieval/dataset/cluster_level_0.pk'
    files, fefc  = ReadFromFile(data_features)
    cluster_list = cluster_kmeas(n_clusters = n_clusters, max_iter= max_iter, images_descrip=files, features_for_clustering=fefc)
    f = open(writh_file_path, 'wb')
    pickle.dump(cluster_list, f)
    return writh_file_path

def cluster_level_1(max_iter, n_clusters,writh_file_path, data_features):
    # level_2 聚类
    fefc = {}
    for i in range(len(data_features)):
        feature_config = torch.from_numpy(data_features[i]['descriptor_np_list'][0]).unsqueeze(0).float().to(device)
        fefc.update({data_features[i]['filename'][0]: feature_config})
    writh_file_path_1 = r'/home/ubuntu/data/Image_Retrieval/dataset/cluster_level_1.pk'

    def cluster_thread(files,i,fefc):
        cluster_list_level = []
        # 标记所属聚类
        cluster_list_level.append(i)
        features_for_clustering = torch.empty((0, data_features[0]['descriptor_np_list'][0].shape[0])).to(device)
        for j in range(len(files)):
            features_for_clustering = torch.cat((features_for_clustering, fefc[files[j]]), 0)
        cluster_list = cluster_kmeas(n_clusters=n_clusters, max_iter = max_iter,
                                     images_descrip=files, features_for_clustering=features_for_clustering)
        cluster_list_level.append(cluster_list)
        return cluster_list_level

    with open(writh_file_path, 'rb') as file:
        b = pickle.load(file)
        cluster_list_level = []
        cluster_list_level.append(b[0])
        result = None
        pool = ThreadPoolExecutor(max_workers=32)
        futures = {
            pool.submit(
                cluster_thread,
                b[-1][i],
                i,
                fefc
            ):
                i  for i in range(len(b[0]))
        }
        for future in as_completed(futures):
            result = future.result()
            cluster_list_level.append(result)

        f = open(writh_file_path_1, 'wb')
        pickle.dump(cluster_list_level, f)


if __name__ == "__main__":
    # file_path: features
    # writh_file_path : 聚类level_0
    # writh_file_path_1 : 聚类level_1
    # device = torch.device("cuda:0")
    # n_clusters_level_0 = 16
    # max_iter_0 = 300
    # n_clusters_level_1 = 64
    # max_iter_1 = 300
    # file_path = r'/home/ubuntu/data/Image_Retrieval/dataset/test_index_pca0.delf'
    # writh_file_path = r'/home/ubuntu/data/Image_Retrieval/dataset/cluster_level_0.pk'
    writh_file_path_1 = r'/home/ubuntu/data/Image_Retrieval/dataset/cluster_level_1.pk'
    writh_file_path_2 = r'/home/ubuntu/data/web/app/model/search_model.pkl'
    # data_features = pickle.load(open(file_path, 'rb'))
    # # writh_file_path_0 = cluster_level_0(max_iter_0, n_clusters_level_0, data_features)
    # cluster_level_1(max_iter_1, n_clusters_level_1, writh_file_path, data_features)
    data_features = pickle.load(open(writh_file_path_2, 'rb'))
    print(data_features)
