import numpy as np
from cv2 import cv2
import re
import matplotlib.pyplot as plt
from utils2 import *
import random
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib
import matplotlib.image as gImage
from sklearn.manifold import TSNE
from matplotlib.ticker import FuncFormatter
import scipy.stats
import time
import random
from sklearn.metrics import confusion_matrix
import copy
import pickle
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn_extra.cluster import KMedoids
from sklearn import metrics
from sklearn.metrics import pairwise_distances



feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_method2_exp2.npy'
anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2_exp2'
referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorReferenceLabel_method2_exp2.txt' # 默认没有reference label


feat = np.load(feature_path)
frame_count = np.shape(feat)[0]

sampleNum = 16
anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx3(anchorImg_path, sampleNum = sampleNum, sample_interval = 2)
anchor_idx = [anchor_pos_list[i][0] for i in range(len(anchor_pos_list))]
training_idx = []
for i in anchor_pos_list:
    training_idx += i[1]

if referenceLabel_path != '':
    f = open(referenceLabel_path,'r')
    lines = f.readlines()
    rf_label_anchor = [int(i[0]) for i in lines]
else:
    rf_label_anchor = [0 for i in anchor_idx]

rf_label_training = [rf_label_anchor[i] for i in range(len(rf_label_anchor)) for j in range(sampleNum)]
rf_label_anchor = np.array(rf_label_anchor)
rf_label_training = np.array(rf_label_training)

training_feat = feat[training_idx]
# ======================数据加载结束======================




# =======================PCA降维======================
cmap = 'rainbow'

pca = decomposition.PCA(n_components=2)
pca.fit(training_feat)
feat_pca = pca.fit_transform(training_feat)

fig = plt.figure()
ax = fig.add_subplot(121)
plt.scatter(feat_pca[:,0], feat_pca[:,1], c = rf_label_training ,cmap = cmap, alpha=0.2)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])
plt.title('training data PCA')
plt.axis('equal')
for i in range(np.shape(feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(feat_pca[i,0],feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)
print('pca explained_variance_ratio_ : ',pca.explained_variance_ratio_)

width = 0.1
# np.random.seed(1)
x = np.random.uniform(-width, width, size = 100)
y = np.sqrt(1-np.power(x,2))
origin = np.array([x,y]).transpose(1,0)
data = origin

for theta in [180, 45]:
# for theta in [90,270,180, 45, 225,135,315]:
    theta = theta * np.pi / 180
    rotate_matrix = [[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]
    rotate_matrix = np.array(rotate_matrix)
    rotated = np.matmul(origin, rotate_matrix)
    data = np.concatenate((data, rotated))
# training_feat = data


at_feat = training_feat[np.array(rf_label_training) == 3]
sr_feat = training_feat[np.array(rf_label_training) == 1]
print(np.average(np.matmul(at_feat, sr_feat.transpose(1,0))))

# exit(0)



# =======================聚类======================

# infomap 聚类结果
f = open('C:/Users/A/Desktop/fuck/out/round2graph.clu','r')
lines = f.readlines()
f.close()
infomap_label = list(np.zeros(np.shape(training_feat)[0], dtype=int))

for line in lines:
    if not len(line):
        continue
    if line[0] == '#':
        continue
    idx, label, flow = line.split()
    idx = int(idx)
    label = int(label)
    infomap_label[training_idx.index(idx)] = label

s_score = metrics.silhouette_score(training_feat, infomap_label, metric='cosine')
db_socre = metrics.davies_bouldin_score(training_feat, infomap_label)

ax = fig.add_subplot(122)
plt.scatter(feat_pca[:,0], feat_pca[:,1], c = infomap_label ,cmap = cmap, alpha=0.5)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks(list(np.linspace(0,1,len(set(infomap_label)))))
cbar.set_ticklabels(list(range(0,len(set(infomap_label)))))
plt.title('infomap cluster. n_cluster %d s_score %.3f db_score %.3f'%(len(set(infomap_label)), s_score, db_socre ))
plt.axis('equal')
for i in range(np.shape(feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(feat_pca[i,0],feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)


print('s_score',s_score)
print('db_socre',db_socre)

plt.show()

exit(0)

s = 0
sim_matrix = np.matmul(training_feat, training_feat.transpose(1,0))
sim_matrix = np.exp(sim_matrix)
# sim_matrix = sim_matrix + 1
f = open('C:/Users/A/Desktop/fuck/round2graph.txt','w')
for i in range(np.shape(training_feat)[0]):
    for j in range(np.shape(training_feat)[0]):
        if j == i:
            continue
        if sim_matrix[i][j] < 2:
            continue
        f.write('%d %d %.5f\n'%(training_idx[i], training_idx[j], sim_matrix[i][j]))
        s +=  sim_matrix[i][j]
    if i % 100 == 1:
        print(i)
print(s)
f.close()



exit(0) #######################################################


eps_s_score = np.inf
eps_db_score = np.inf
max_s_score = -np.inf
min_db_score = np.inf
cluster_s_score = 0
cluster_db_score = 0
# for eps in np.linspace(0.001,0.2,400):
for eps in [0.07281954887218045]:
    cluster_model = DBSCAN(eps=eps, min_samples=2, metric= 'cosine').fit(training_feat)
    if len(set(cluster_model.labels_)) == 1:
        print('only one cluster!')
        break

    s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
    db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

    if s_score > max_s_score:
        max_s_score = s_score
        eps_s_score = eps
        cluster_s_score = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)
    if min_db_score > db_socre:
        min_db_score = db_socre
        eps_db_score = eps
        cluster_db_score = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)

    noise_idx = np.argwhere(np.array(cluster_model.labels_) == -1)

    n_clusters = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)
    n_noise = list(cluster_model.labels_).count(-1)

    # fig = plt.figure()
    ax = fig.add_subplot(122)
    plt.scatter(feat_pca[:,0], feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5)
    if noise_idx.size != 0:
        plt.scatter(feat_pca[noise_idx,0], feat_pca[noise_idx,1], c = 'black', zorder = 5, alpha=0.1)
    cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
    if n_noise != 0:
        cbar.set_ticks(list(np.linspace(0,1,n_clusters + 1)))
        cbar.set_ticklabels(list(range(-1,n_clusters)))
    else:
        cbar.set_ticks(list(np.linspace(0,1,n_clusters)))
        cbar.set_ticklabels(list(range(0,n_clusters)))       
    for i in range(np.shape(feat_pca)[0]):
        if np.random.rand(1)[0] < 0.2:
            ax.text(feat_pca[i,0],feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)


    plt.title('training data PCA')
    plt.axis('equal')



    print("eps %f, n_cluster %d, n_noise %d, s_score %.3f, db_score %.3f"%(eps, n_clusters, n_noise, s_score, db_socre))
    plt.title("eps %f, n_cluster %d, n_noise %d, s_score %.3f, db_score %.3f"%(eps, n_clusters, n_noise, s_score, db_socre))
    plt.show()
    plt.close()

label = list(set(cluster_model.labels_))

small_class_feat = np.array([])
for i in label:
    n = list(cluster_model.labels_).count(i)
    print("label = %d, number = %d"%(i, n))
    
    training_idx = np.array(training_idx)
    # if n < 33:
    #     print(training_idx[np.array(cluster_model.labels_) == i])
    cur_feat = training_feat[np.array(cluster_model.labels_) == i]
    if n < 33 and n > 10:
        if small_class_feat.size == 0:
            small_class_feat = cur_feat
        else:
            small_class_feat = np.concatenate((small_class_feat, cur_feat))

    cur_feat_trans = cur_feat.transpose(1,0)
    res = np.matmul(cur_feat, cur_feat_trans)
    res = np.sum(res) - np.trace(res)
    res = res / (n*(n-1))
    print('cluster average cosine similarity : ',res)


print("eps_s_score", eps_s_score)
print("eps_db_score",eps_db_score)
print('cluster_s_score',cluster_s_score)
print('cluster_db_score',cluster_db_score)

print(np.average(np.matmul(small_class_feat, small_class_feat.transpose(1,0))))

exit(0) #######################################################

max_s_score = -np.inf
min_db_score = np.inf
cluster_s_score = 0
cluster_db_score = 0
# for eps in np.linspace(0.001,0.2,400):
n_clusters = 9
cluster_model = AgglomerativeClustering(n_clusters=n_clusters, linkage='single', affinity= 'cosine').fit(training_feat)

s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

if s_score > max_s_score:
    max_s_score = s_score
    cluster_s_score = len(set(cluster_model.labels_))
if min_db_score > db_socre:
    min_db_score = db_socre
    cluster_db_score = len(set(cluster_model.labels_))


# fig = plt.figure()
ax = fig.add_subplot(122)
plt.scatter(feat_pca[:,0], feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks(list(np.linspace(0,1,n_clusters)))
cbar.set_ticklabels(list(range(0,n_clusters)))
plt.axis('equal')
for i in range(np.shape(feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(feat_pca[i,0],feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)


print(" n_cluster %d,  s_score %.3f, db_score %.3f"%( n_clusters, s_score, db_socre))
plt.title("n_cluster %d,  s_score %.3f, db_score %.3f"%(n_clusters, s_score, db_socre))
plt.show()
plt.close()

label = list(set(cluster_model.labels_))
for i in label:
    n = list(cluster_model.labels_).count(i)
    print("label = %d, number = %d"%(i, n))

    training_idx = np.array(training_idx)
    # if n < 50:
    #     print(training_idx[np.array(cluster_model.labels_) == i])
    cur_feat = training_feat[np.array(cluster_model.labels_) == i]
    cur_feat_trans = cur_feat.transpose(1,0)
    res = np.matmul(cur_feat, cur_feat_trans)
    res = np.sum(res) - np.trace(res)
    if n != 1:
        res = res / (n*(n-1))
    print('cluster average cosine similarity : ',res)

print('cluster_s_score',cluster_s_score)
print('cluster_db_score',cluster_db_score)

exit(0) #######################################################




for n_clusters in range(2,21):
    cluster_model = KMeans(n_clusters=n_clusters).fit(training_feat)
    centers = cluster_model.cluster_centers_

    # fig = plt.figure()
    # fig.add_subplot(122)
    # plt.scatter(feat_pca[:,0], feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5)
    # plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
    # plt.title('training data PCA')
    # plt.axis('equal')

    # plt.figure()
    # for i in centers:
    #     plt.scatter(i[0],i[1], c = 'black',zorder = 4)
    # plt.scatter(training_feat[:,0],training_feat[:,1], c = cluster_model.labels_, cmap='rainbow', zorder = 3)
    # plt.xlim((-1.2,1.2))
    # plt.ylim((-1.2,1.2))
    # plt.plot([-1,1],[0,0], c = 'black')
    # plt.plot([0,0],[-1,1], c = 'black')
    # plt.axis('equal')

    s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
    db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

    print("n_cluster %d, s_score %.3f, db_score %.3f"%(n_clusters, s_score, db_socre))
    # plt.show()



exit(0) #######################################################




for n_clusters in range(2,21):
    cluster_model = KMedoids(n_clusters=n_clusters, metric='cosine').fit(training_feat)
    centers = cluster_model.cluster_centers_

    # fig = plt.figure()
    # ax = fig.add_subplot(111)
    # plt.scatter(feat_pca[:,0], feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5)
    # plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
    # plt.title('training data PCA')
    # plt.axis('equal')
    # plt.show()



    # plt.figure()
    # for i in centers:
    #     plt.scatter(i[0],i[1], c = 'black',zorder = 4)
    # plt.scatter(training_feat[:,0],training_feat[:,1], c = cluster_model.labels_, cmap='rainbow', zorder = 3)
    # plt.xlim((-1.2,1.2))
    # plt.ylim((-1.2,1.2))
    # plt.plot([-1,1],[0,0], c = 'black')
    # plt.plot([0,0],[-1,1], c = 'black')
    # plt.axis('equal')

    s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
    db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

    print("n_cluster %d, s_score %.3f, db_score %.3f"%(n_clusters, s_score, db_socre))
    
    # plt.title("n_cluster %d, score %.3f"%(n_clusters, np.average(score)))
    # plt.show()

exit(0) #######################################################





for n_clusters in range(2,21):
    # cluster_model = KMedoids(n_clusters=n_clusters, metric='euclidean').fit(training_feat)
    cluster_model = KMeans(n_clusters=n_clusters).fit(training_feat)
    centers = cluster_model.cluster_centers_


    plt.figure()
    for i in centers:
        plt.scatter(i[0],i[1], c = 'black',zorder = 4)
    plt.scatter(training_feat[:,0],training_feat[:,1], c = cluster_model.labels_, cmap='rainbow', zorder = 3)
    plt.xlim((-1.2,1.2))
    plt.ylim((-1.2,1.2))
    plt.plot([-1,1],[0,0], c = 'black')
    plt.plot([0,0],[-1,1], c = 'black')
    plt.axis('equal')
    

    entropy_list = []
    for i in training_feat:
        dises = []
        for j in centers:
            # dis = cosine_distance(i,j)
            dis = inverse_euclidean_distance(i,j)
            dises.append(dis)
        dises = my_softmax(dises)
        entropy = my_entropy(dises)
        entropy_list.append(entropy)
    print("n_cluster %d, average entropy %.3f"%(n_clusters, np.average(entropy_list)))
    
    plt.title("n_cluster %d, average entropy %.3f"%(n_clusters, np.average(entropy_list)))
    plt.show()
    