import numpy as np
from cv2 import cv2
import re
import matplotlib.pyplot as plt
from numpy.core.defchararray import count
from utils2 import *
import random
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib
import matplotlib.image as gImage
from sklearn.manifold import TSNE
from matplotlib.ticker import FuncFormatter
import scipy.stats
import time
import random
from sklearn.metrics import confusion_matrix
import copy
import pickle
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn_extra.cluster import KMedoids
from sklearn import metrics
from sklearn.metrics import pairwise_distances



feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/supplementSampleByHumanNpyFiles/320pairWithRound1Supplement_moreDataAug_grayOpticalFlow/round2.npy'
# feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/80pair.npy'
anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2_exp2'
anchor_referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorReferenceLabel_method2_exp2.txt' # 默认没有reference label
position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_sync_position.npy' # 经纬度
allImage_referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/allImageReferenceLabel.txt'

pkuBirdViewImg = 'D:\\Research\\2020ContrastiveLearningForSceneLabel\\Data\\pkuBirdView.png'
anchor_training_index_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs_method2_exp2/round2_method2_exp2_training_index.txt'


feat = np.load(feature_path)
frame_count = np.shape(feat)[0]

sampleNum = 16
# anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx3(anchorImg_path, sampleNum = sampleNum, sample_interval = 2)
anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx4(anchor_training_index_path)

anchor_idx = [anchor_pos_list[i][0] for i in range(len(anchor_pos_list))]
training_idx = []
for i in anchor_pos_list:
    training_idx += i[1]

if anchor_referenceLabel_path != '':
    f = open(anchor_referenceLabel_path,'r')
    lines = f.readlines()
    f.close()
    rf_label_anchor = [int(i[0]) for i in lines]
else:
    rf_label_anchor = [0 for i in anchor_idx]

rf_label_training = [rf_label_anchor[i] for i in range(len(rf_label_anchor)) for j in range(sampleNum)]
rf_label_anchor = np.array(rf_label_anchor)
rf_label_training = np.array(rf_label_training)

f = open(allImage_referenceLabel_path,'r')
lines = f.readlines()
rf_label_allFrame = []
for line in lines:
    label = int(line.split()[1])
    rf_label_allFrame.append(label)
f.close()

training_feat = feat[training_idx]
anchor_feat = feat[anchor_idx]

print(np.array(anchor_idx)[np.array(rf_label_anchor) == 3])

res = []
for i in anchor_pos_list:
    for j in i[1]:
        res.append( np.sum(feat[i[0]] * feat[j]))
print('Anchor-Positive similarity: ', np.average(res))

res = []
for i in anchor_neg_list:
    for j in i[1]:
        res.append( np.sum(feat[i[0]] * feat[j]))
print('Anchor-Negative similarity: ', np.average(res))

res = []
for i, _ in enumerate(anchor_pos_list):
    if rf_label_anchor[i] == 3:
        for j in anchor_pos_list[i][1]:
            res.append(np.sum(feat[anchor_pos_list[i][0]] * feat[j]))
print('Alerting anchor-positive similarity: ', np.average(res))

res = []
for i, _ in enumerate(anchor_neg_list):
    if rf_label_anchor[i] == 3:
        for j in anchor_neg_list[i][1]:
            res.append(np.sum(feat[anchor_neg_list[i][0]] * feat[j]))
print('Alerting anchor-negative similarity: ', np.average(res))
# exit(0)
# ======================数据加载结束======================

# =======================PCA降维======================
cmap = 'rainbow'

pca = decomposition.PCA(n_components=2)
pca.fit(training_feat)
training_feat_pca = pca.transform(training_feat)
anchor_feat_pca = pca.transform(anchor_feat)

# training_feat_pca = TSNE().fit_transform(training_feat)

# 把降维后的特征画出来，颜色表示reference label
fig = plt.figure()
ax = fig.add_subplot(121)
plt.scatter(training_feat_pca[:,0], training_feat_pca[:,1], c = rf_label_training ,cmap = cmap, alpha=0.5)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])
plt.title('training data PCA')
plt.axis('equal')

# 随机写某个特征的索引
for i in range(np.shape(training_feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(training_feat_pca[i,0],training_feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1)

# 加号表示锚点特征
plt.scatter(anchor_feat_pca[:,0], anchor_feat_pca[:,1], marker = '+')
# 将锚点索引写在降维后的特征图上
for i in range(np.shape(anchor_feat_pca)[0]):
    ax.text(anchor_feat_pca[i,0],anchor_feat_pca[i,1], str( anchor_idx[i] ) + ', %d'%i, fontsize=10, alpha = 0.5)

sample = 2
plt.scatter(anchor_feat_pca[sample,0], anchor_feat_pca[sample,1], c = 'black')


print(np.sum(feat[764] * feat[30182]))

print('pca explained_variance_ratio_ : ',pca.explained_variance_ratio_)

# plt.figure()
# all_feat_pca = pca.transform(feat)
# plt.scatter(all_feat_pca[:,0], all_feat_pca[:,1], c = rf_label_allFrame, cmap = cmap, alpha= 0.5)


# ======================= 层次聚类 ======================

max_s_score = -np.inf
min_db_score = np.inf
cluster_s_score = 0
cluster_db_score = 0

valid_rate_list = []
for n_clusters in range(1):
    n_clusters = 3
    cluster_model = AgglomerativeClustering(n_clusters=n_clusters, linkage='single', affinity= 'cosine').fit(training_feat)
    cluster_model = KMeans(n_clusters=n_clusters).fit(training_feat)

    s_score = metrics.silhouette_score(training_feat, cluster_model.labels_, metric='cosine')
    db_socre = metrics.davies_bouldin_score(training_feat, cluster_model.labels_)

    if s_score > max_s_score:
        max_s_score = s_score
        cluster_s_score = len(set(cluster_model.labels_))
    if min_db_score > db_socre:
        min_db_score = db_socre
        cluster_db_score = len(set(cluster_model.labels_))

    anchor_label = [cluster_model.labels_[i* sampleNum] for i in range(len(anchor_idx))]

    valid_num = 0.0
    for i in range(len(anchor_label)):
        if anchor_label[i] != anchor_label[(i+1) % len(anchor_label)]:
            valid_num += 1
        if anchor_label[i] != anchor_label[(i-1)]:
            valid_num += 1

    # # 检查正样本是否被聚为同一类
    # anchor_num = 0
    # abnormal_cluster_num = 0 # 正样本没有被聚为一类的锚点个数
    # for indexes in anchor_pos_list:
    #     anchor_num += 1
    #     indexes = indexes[1]
    #     indexes = [list(training_idx).index(i) for i in indexes]
    #     labels = cluster_model.labels_[indexes]
    #     if len(set(labels)) != 1:
    #         abnormal_cluster_num += 1
    #         print(np.array(training_idx)[indexes])

    # print(" n_cluster %d,  s_score %.3f, db_score %.3f"%( n_clusters, s_score, db_socre), end = ' ')
    # print('anchor_num %d, abnormal_cluster_num %d'%(anchor_num, abnormal_cluster_num))
    # if abnormal_cluster_num: 
    #     break
    # valid_rate = valid_num / float(2 * len(anchor_label))
    # valid_rate_list.append([n_clusters, valid_rate])
# plt.figure()
# valid_rate_list = np.array(valid_rate_list)
# print(valid_rate_list)
# plt.scatter(valid_rate_list[:,0], valid_rate_list[:,1])
# plt.show()
# exit(0)

# 经pca降维后把聚类结果画出来
# fig = plt.figure()
ax = fig.add_subplot(122)
pca_ax = ax
plt.scatter(training_feat_pca[:,0], training_feat_pca[:,1], c = cluster_model.labels_ ,cmap = cmap, alpha=0.5)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks(list(np.linspace(0,1,n_clusters)))
cbar.set_ticklabels(list(range(0,n_clusters)))
plt.axis('equal')
for i in range(np.shape(training_feat_pca)[0]):
    if np.random.rand(1)[0] < 0.0:
        ax.text(training_feat_pca[i,0],training_feat_pca[i,1], str( training_idx[i] ), fontsize=10, alpha = 0.1) # 每个锚点的索引


print(" n_cluster %d,  s_score %.3f, db_score %.3f"%( n_clusters, s_score, db_socre))
plt.title("n_cluster %d,  s_score %.3f, db_score %.3f"%(n_clusters, s_score, db_socre))


fig = plt.figure()
ax = fig.add_subplot(121)
plt.scatter(training_feat_pca[:,0], training_feat_pca[:,1], c = rf_label_training ,cmap = cmap, alpha=0.5)
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])
plt.title('training data PCA')
plt.axis('equal')

feat_pca = pca.transform(feat)
ax = fig.add_subplot(122)
plt.scatter(feat_pca[:,0], feat_pca[:,1], c = rf_label_allFrame ,cmap = cmap, alpha=0.5)

plt.show()
# exit(0)
# plt.close()


# 计算每个聚类簇内部的平均相似度
label = list(set(cluster_model.labels_))
for i in label:
    n = list(cluster_model.labels_).count(i)
    print("label = %d, number = %d"%(i, n))

    training_idx = np.array(training_idx)
    # if n < 50:
    #     print(training_idx[np.array(cluster_model.labels_) == i])
    cur_feat = training_feat[np.array(cluster_model.labels_) == i]
    cur_feat_trans = cur_feat.transpose(1,0)
    res = np.matmul(cur_feat, cur_feat_trans)
    res = np.sum(res) - np.trace(res)
    if n != 1:
        res = res / (n*(n-1))
    print('cluster average cosine similarity : ',res)

print('cluster_s_score',cluster_s_score)
print('cluster_db_score',cluster_db_score)

# exit(0) #######################################################



# 将数据量小于某阈值的所有聚类簇视为同一类小类别
processed_training_cluster_label = np.ones_like(cluster_model.labels_, dtype=int) * (-666)
next_label = 1
labels = list(set(cluster_model.labels_))
for i in labels:
    if i == -1:
        processed_training_cluster_label[np.array(cluster_model.labels_) == i] = i
    elif list(cluster_model.labels_).count(i) > 100: # 每个大类别都有自己的标签
        processed_training_cluster_label[np.array(cluster_model.labels_) == i] = next_label
        next_label += 1
    else:
        processed_training_cluster_label[np.array(cluster_model.labels_) == i] = -2
processed_training_cluster_label[processed_training_cluster_label == -2] = np.max(processed_training_cluster_label) + 1 # 所有小类别共用一个标签
if -666 in processed_training_cluster_label:
    print('Process label error! Program exit.')
    exit(0)


processed_training_cluster_label = cluster_model.labels_ + 1

# ===== 把训练数据上的聚类结果画在轨迹上=====
GNSS = np.load(position_path)
rotate = 0 # 角度制
shiftX = 625
shiftY = 620
dx = 0.777
dy = 0.777 # 以上参数都是手调的
alpha = 0.5
GNSS[:,0] = -GNSS[:,0]
GNSS[:,1] *= dx
GNSS[:,0] *= dy
GNSS[:,1] += shiftX
GNSS[:,0] += shiftY

# 训练数据的分类结果画在卫星图上
# 卫星图
fig = plt.figure()
ax = fig.add_subplot(121)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)

ax.scatter(GNSS[training_idx,1], GNSS[training_idx,0], s=15, c = processed_training_cluster_label, cmap=cmap, zorder = 1) # 画所有训练数据
plt.axis('equal')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))

for i in range(len(training_idx)):
    if np.random.rand(1)[0] < 0.0:
        ax.text(GNSS[training_idx[i],1],GNSS[training_idx[i],0], str( training_idx[i] ), fontsize=10, alpha = 0.5) # 画部分训练数据的索引

plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 1, c='k', zorder = 3) # 画所有锚点
# for i in range(len(anchor_idx)):
#     ax.text(GNSS[anchor_idx[i],1],GNSS[anchor_idx[i],0], str( anchor_idx[i] ), fontsize=10, alpha = 0.5) # 将锚点的索引画在轨迹上

ax.plot(GNSS[:, 1],GNSS[:, 0], zorder = 0) # 画车辆行驶轨迹

process_n_cluster = len(set(processed_training_cluster_label))

cbar.set_ticks(list(np.linspace(0,1,process_n_cluster)))
cbar.set_ticklabels(list(range(0,process_n_cluster)))      
ax.set_title('clustering res on training data')

# 训练数据的reference label画在卫星图上
ax = fig.add_subplot(122)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)

ax.scatter(GNSS[training_idx,1], GNSS[training_idx,0], s=15, c = rf_label_training, cmap=cmap, zorder = 1) # 画所有训练数据
plt.axis('equal')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))

for i in range(len(training_idx)):
    if np.random.rand(1)[0] < 0.0:
        ax.text(GNSS[training_idx[i],1],GNSS[training_idx[i],0], str( training_idx[i] ), fontsize=10, alpha = 0.5) # 画部分训练数据的索引

plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 1, c='k', zorder = 3) # 画所有锚点
# for i in range(len(anchor_idx)):
#     ax.text(GNSS[anchor_idx[i],1],GNSS[anchor_idx[i],0], str( anchor_idx[i] ), fontsize=10, alpha = 0.5) # 将锚点的索引画在轨迹上

ax.plot(GNSS[:, 1],GNSS[:, 0], zorder = 0) # 画车辆行驶轨迹

process_n_cluster = len(set(processed_training_cluster_label))

cbar.set_ticks(list(np.linspace(0,1,process_n_cluster)))
cbar.set_ticklabels(list(range(0,process_n_cluster)))      
ax.set_title('Reference labels')



# 处理一下聚类标签，保证标签为1的是直路
# if processed_training_cluster_label[0] == 2:
#     fuck = np.copy(processed_training_cluster_label)
#     fuck[processed_training_cluster_label == 1] = 2
#     fuck[processed_training_cluster_label == 2] = 1
#     processed_training_cluster_label = fuck


# 对所有数据分类
classify_label = []


straight_avg_feat = np.average(training_feat[processed_training_cluster_label == 1], axis=0)
turn_avg_feat = np.average(training_feat[processed_training_cluster_label == 2], axis=0)
alter_avg_feat = np.average(training_feat[processed_training_cluster_label == 3], axis=0)
fuck = np.array([straight_avg_feat, turn_avg_feat, alter_avg_feat])
fuck = pca.transform(fuck)
pca_ax.scatter(fuck[:,0],fuck[:,1],c = 'black')
for i in range(np.shape(feat)[0]):
    cur_feat = feat[i]
    a = np.sum(cur_feat*straight_avg_feat)
    b = np.sum(cur_feat*turn_avg_feat)
    c = np.sum(cur_feat*alter_avg_feat)
    if a == max([a,b,c]):
        classify_label.append(1)
    elif b == max([a,b,c]):
        classify_label.append(2)
    else:
        classify_label.append(3)

supervised_labels_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_supervised_lables.npy'
supervised_labels = np.load(supervised_labels_path)
# classify_label = supervised_labels + 1

classify_label = np.array(classify_label)

# for i in range(np.shape(feat)[0]):
#     cur_feat = feat[i]
#     sim = np.sum(cur_feat * training_feat, axis = 1)
#     idx = np.argmax(sim)
#     classify_label.append(processed_training_cluster_label[idx])


# 把分类结果及reference label画在一张图上
# 画分类结果
fig = plt.figure()
ax = fig.add_subplot(121)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)
ax.scatter(GNSS[:, 1],GNSS[:, 0], c = classify_label, cmap = cmap, s = 5)
ax.set_title('Classifying results')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])
plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 1, c='k', zorder = 3) # 画所有锚点


# 将reference label画在另一个子图上
ax = fig.add_subplot(122)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)
ax.scatter(GNSS[:, 1],GNSS[:, 0], c = rf_label_allFrame, cmap = cmap, s = 5)
ax.set_title('Reference labels')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels(['SR','TR','AT'])

print('<================Training set classification report================>')
print(classification_report(rf_label_training, classify_label[training_idx], digits=3))
print('<================Training set classification report================>')

print('<================All frames classification report================>')
print(classification_report(rf_label_allFrame, classify_label, digits=3))
print('<================All frames classification report================>')

plt.show()