import numpy as np
from cv2 import cv2
import re
import matplotlib.pyplot as plt
from utils2 import *
import random
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib
import matplotlib.image as gImage
from sklearn.manifold import TSNE
from matplotlib.ticker import FuncFormatter
import scipy.stats
import time
import random
from sklearn.metrics import confusion_matrix
import copy
import pickle
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn_extra.cluster import KMedoids
from sklearn import metrics
from sklearn.metrics import pairwise_distances

feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_method2.npy'
anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2'
referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorReferenceLabel.txt' # 默认没有reference label
position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_sync_position.npy' # 经纬度

referenceLabel_path = ''

pkuBirdViewImg = 'D:\\Research\\2020ContrastiveLearningForSceneLabel\\Data\\pkuBirdView.png'

feat = np.load(feature_path)
frame_count = np.shape(feat)[0]

anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx3(anchorImg_path, sampleNum = 16, sample_interval = 2)
anchor_idx = [anchor_pos_list[i][0] for i in range(len(anchor_pos_list))]
training_idx = []
for i in anchor_pos_list:
    training_idx += i[1]


if referenceLabel_path != '':
    f = open(referenceLabel_path,'r')
    lines = f.readlines()
    rf_label_anchor = [int(i[0]) for i in lines]
    rf_label_allFrame = list(range(frame_count))
    for i, idx in enumerate(anchor_idx):
        if i == len(anchor_idx) - 1:
            break
        for j in range(anchor_idx[i], anchor_idx[i+1]):
            rf_label_allFrame[j] = rf_label_anchor[i]

    for i in range(anchor_idx[-1], frame_count):
        rf_label_allFrame[i] = rf_label_anchor[-1]
    for i in range(0, anchor_idx[0]):
        rf_label_allFrame[i] = rf_label_anchor[-1]

else:
    rf_label_anchor = [0 for i in anchor_idx]
    rf_label_allFrame = [0 for i in range(frame_count)]

rf_label_training = [rf_label_allFrame[training_idx[i]] for i in range(len(training_idx))]

rf_label_anchor = np.array(rf_label_anchor)
rf_label_training = np.array(rf_label_training)
rf_label_allFrame = np.array(rf_label_allFrame)

training_feat = feat[training_idx]
# ======================数据加载结束======================

# =======================PCA降维======================
cmap = 'rainbow'

pca = decomposition.PCA(n_components=2)
pca.fit(training_feat)
training_feat_pca = pca.fit_transform(training_feat)

# training_feat_pca = TSNE().fit_transform(training_feat)

fig = plt.figure()
ax = fig.add_subplot(121)
plt.scatter(training_feat_pca[:,0], training_feat_pca[:,1], c = rf_label_training ,cmap = cmap, alpha=0.2)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])
plt.title('training data PCA')
plt.axis('equal')
for i in range(np.shape(training_feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(training_feat_pca[i,0],training_feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)
print('pca explained_variance_ratio_ : ',pca.explained_variance_ratio_)


# ======================= DBSCAN聚类 ======================
eps_s_score = np.inf
eps_db_score = np.inf
max_s_score = -np.inf
min_db_score = np.inf
cluster_s_score = 0
cluster_db_score = 0
# for eps in np.linspace(0.001,0.2,400):
for eps in [0.23]:#0.079802
    cluster_model = DBSCAN(eps=eps, min_samples=2, metric= 'cosine').fit(training_feat)
    if len(set(cluster_model.labels_)) == 1:
        print('only one cluster!')
        break

    s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
    db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

    if s_score > max_s_score:
        max_s_score = s_score
        eps_s_score = eps
        cluster_s_score = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)
    if min_db_score > db_socre:
        min_db_score = db_socre
        eps_db_score = eps
        cluster_db_score = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)

    training_noise_idx = np.argwhere(np.array(cluster_model.labels_) == -1)

    n_clusters = len(set(cluster_model.labels_)) - (1 if -1 in cluster_model.labels_ else 0)
    n_noise = list(cluster_model.labels_).count(-1)

    # fig = plt.figure()
    ax = fig.add_subplot(122)
    plt.scatter(training_feat_pca[:,0], training_feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5, zorder = 1)
    # if training_noise_idx.size != 0:
    #     plt.scatter(training_feat_pca[training_noise_idx,0], training_feat_pca[training_noise_idx,1], c = 'black', zorder = 1, alpha=1)
    cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
    if n_noise != 0:
        cbar.set_ticks(list(np.linspace(0,1,n_clusters + 1)))
        cbar.set_ticklabels(list(range(-1,n_clusters)))
    else:
        cbar.set_ticks(list(np.linspace(0,1,n_clusters)))
        cbar.set_ticklabels(list(range(0,n_clusters)))       
    for i in range(np.shape(training_feat_pca)[0]):
        if np.random.rand(1)[0] < 0.0:
            ax.text(training_feat_pca[i,0],training_feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)


    plt.title('training data PCA')
    plt.axis('equal')



    print("eps %f, n_cluster %d, n_noise %d, s_score %.3f, db_score %.3f"%(eps, n_clusters, n_noise, s_score, db_socre))
    plt.title("eps %f, n_cluster %d, n_noise %d, s_score %.3f, db_score %.3f"%(eps, n_clusters, n_noise, s_score, db_socre))
    # plt.show()
    # plt.close()

label = list(set(cluster_model.labels_))

small_class_feat = np.array([])
for i in label:
    n = list(cluster_model.labels_).count(i)
    print("label = %d, number = %d"%(i, n))
    
    training_idx = np.array(training_idx)
    # if n < 33:
    #     print(training_idx[np.array(cluster_model.labels_) == i])
    cur_feat = training_feat[np.array(cluster_model.labels_) == i]
    if n < 33 and n > 10:
        if small_class_feat.size == 0:
            small_class_feat = cur_feat
        else:
            small_class_feat = np.concatenate((small_class_feat, cur_feat))

    cur_feat_trans = cur_feat.transpose(1,0)
    res = np.matmul(cur_feat, cur_feat_trans)
    res = np.sum(res) - np.trace(res)
    res = res / (n*(n-1))
    print('cluster average cosine similarity : ',res)


print("eps_s_score", eps_s_score)
print("eps_db_score",eps_db_score)
print('cluster_s_score',cluster_s_score)
print('cluster_db_score',cluster_db_score)

print(np.average(np.matmul(small_class_feat, small_class_feat.transpose(1,0))))

# exit(0) #######################################################

# ===== 把训练数据上的聚类结果画在轨迹上=====
GNSS = np.load(position_path)
rotate = 0 # 角度制
shiftX = 625
shiftY = 620
dx = 0.777
dy = 0.777 # 以上参数都是手调的
alpha = 0.5

GNSS[:,0] = -GNSS[:,0]
GNSS[:,1] *= dx
GNSS[:,0] *= dy
GNSS[:,1] += shiftX
GNSS[:,0] += shiftY


fig = plt.figure()
ax = fig.add_subplot(111)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)

processed_cluster_label = np.ones_like(cluster_model.labels_, dtype=int) * (-666)
next_label = 0
labels = list(set(cluster_model.labels_))
for i in labels:
    if i == -1:
        processed_cluster_label[np.array(cluster_model.labels_) == i] = i
    elif list(cluster_model.labels_).count(i) > 100: # 每个大类别都有自己的标签
        processed_cluster_label[np.array(cluster_model.labels_) == i] = next_label
        next_label += 1
    else:
        processed_cluster_label[np.array(cluster_model.labels_) == i] = -2
processed_cluster_label[processed_cluster_label == -2] = np.max(processed_cluster_label) + 1 # 所有小类别共用一个标签
if -666 in processed_cluster_label:
    print('Process label error! Program exit.')
    exit(0)


processed_cluster_label = cluster_model.labels_

ax.scatter(GNSS[training_idx,1], GNSS[training_idx,0], s=15, c = processed_cluster_label, cmap=cmap, zorder = 1) # 车辆行驶轨迹
ax.scatter(GNSS[training_idx[training_noise_idx],1], GNSS[training_idx[training_noise_idx],0], s=15, c = processed_cluster_label[training_noise_idx], cmap=cmap, zorder = 2) # 车辆行驶轨迹
plt.axis('equal')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))

for i in range(len(training_idx)):
    if np.random.rand(1)[0] < 0.0:
        ax.text(GNSS[training_idx[i],1],GNSS[training_idx[i],0], str( training_idx[i] ), fontsize=10, alpha = 0.5)

for i in range(np.shape(training_noise_idx)[0]):
    ax.text(GNSS[training_idx[training_noise_idx[i]],1],GNSS[training_idx[training_noise_idx[i]],0], str( training_idx[training_noise_idx[i]] ), fontsize=10, alpha = 0.5)

# for i in range(len(anchor_idx)):
#     ax.text(GNSS[anchor_idx[i],1],GNSS[anchor_idx[i],0], str( anchor_idx[i] ), fontsize=10, alpha = 0.5)

ax.plot(GNSS[:, 1],GNSS[:, 0], zorder = 0)

process_n_cluster = len(set(processed_cluster_label)) - (1 if -1 in cluster_model.labels_ else 0)
if n_noise != 0:
    cbar.set_ticks(list(np.linspace(0,1,process_n_cluster + 1)))
    cbar.set_ticklabels(list(range(-1,process_n_cluster)))
else:
    cbar.set_ticks(list(np.linspace(0,1,process_n_cluster)))
    cbar.set_ticklabels(list(range(0,process_n_cluster)))      

plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 1, c='k', zorder = 3) # 所有锚点
plt.title('DBSCAN cluster res on training data')


# 检查正样本是否被聚为同一类
anchor_num = 0
abnormal_cluster_num = 0
for indexes in anchor_pos_list:
    anchor_num += 1
    indexes = indexes[1]
    indexes = [list(training_idx).index(i) for i in indexes]
    labels = cluster_model.labels_[indexes]
    if len(set(labels)) != 1:
        abnormal_cluster_num += 1
        print(training_idx[indexes])

print('anchor_num %d, abnormal_cluster_num %d'%(anchor_num, abnormal_cluster_num))



print('<================Training set classification report================>')
# print(classification_report(rf_label_training[processed_cluster_label != -1], processed_cluster_label[processed_cluster_label != -1] + 1,digits=3))
print('<================Training set classification report================>')

plt.show()