# 投稿IROS2021后基本明确了要做校园场景的Alerting traffic识别，需要处理不同数据，于是在原来的代码上做相应修改，规范化处理数据。

import numpy as np
from cv2 import cv2
import re
import matplotlib.pyplot as plt
from utils2 import *
import random
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib
import matplotlib.image as gImage
from sklearn.manifold import TSNE
from matplotlib.ticker import FuncFormatter
import scipy.stats
import time
import random
from sklearn.metrics import confusion_matrix
import copy

referenceLabel_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorReferenceLabel.txt' # 默认没有reference label

feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round1/round1.npy'
anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round1/anchorImgs'
position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round1/round1_sync_position.npy' # 经纬度
video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round1/round1.avi'

feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2.npy'
anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/anchorImgs'
position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2_sync_position.npy' # 经纬度
video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round2.avi'

# feature_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round3/round3.npy'
# anchorImg_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round3/anchorImgs'
# position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round3/round3_sync_position.npy' # 经纬度
# video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round3/round3.avi'

pkuBirdViewImg = 'D:\\Research\\2020ContrastiveLearningForSceneLabel\\Data\\pkuBirdView.png'

#======================加载数据，不涉及算法======================
# 加载特征文件
feat = np.load(feature_path)
frame_count = np.shape(feat)[0]

# feat_cmp = np.load(feature_path_cmp)

# 加载锚点及其对应正负样本的id
anchor_pos_list, anchor_neg_list = getAnchorPosNegIdx2(anchorImg_path, sampleNum=16)
anchor_idx = [anchor_pos_list[i][0] for i in range(len(anchor_pos_list))]
anchors_feat = feat[anchor_idx]

# 加载视频文件
video = cv2.VideoCapture(video_path)
count = video.get(cv2.CAP_PROP_FRAME_COUNT)
print('video total count = %d'%count)
frameNum = count
video_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)

# 加载位置文件
GNSS = np.load(position_path)

if referenceLabel_path != '':
    f = open(referenceLabel_path,'r')
    lines = f.readlines()
    gt_label_anchor = [int(i[0]) for i in lines]
    gt_label_allFrame = list(range(frame_count))
    for i, idx in enumerate(anchor_idx):
        if i == len(anchor_idx) - 1:
            break
        for j in range(anchor_idx[i], anchor_idx[i+1]):
            gt_label_allFrame[j] = gt_label_anchor[i]

    for i in range(anchor_idx[-1], frame_count):
        gt_label_allFrame[i] = gt_label_anchor[-1]
    for i in range(0, anchor_idx[0]):
        gt_label_allFrame[i] = gt_label_anchor[-1]

else:
    gt_label_anchor = [0 for i in anchor_idx]
    gt_label_allFrame = [0 for i in range(frame_count)]

gt_label_anchor = np.array(gt_label_anchor)
gt_label_allFrame = np.array(gt_label_allFrame)
# ======================t-SNE降维======================
cmap = 'rainbow'
lower_dim_anchor_feat = TSNE().fit_transform(anchors_feat)
fig = plt.figure()
fig.add_subplot(131)
plt.scatter(lower_dim_anchor_feat[:,0], lower_dim_anchor_feat[:,1], c=gt_label_anchor, cmap=cmap)
plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
for i in range(len(lower_dim_anchor_feat)):
    plt.annotate(anchor_idx[i], xy=(lower_dim_anchor_feat[i,0], lower_dim_anchor_feat[i,1]), xytext=(lower_dim_anchor_feat[i,0], lower_dim_anchor_feat[i,1]), alpha=0.1)
plt.title('anchor t-SNE')

# =======================PCA降维======================
pca = decomposition.PCA(n_components=2)
pca.fit(anchors_feat)
anchors_feat_pca = pca.fit_transform(anchors_feat)
fig.add_subplot(132)
plt.scatter(anchors_feat_pca[:,0], anchors_feat_pca[:,1], c = gt_label_anchor, cmap=cmap)
plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
plt.title('anchor PCA')

pca = decomposition.PCA(n_components=2)
pca.fit(feat)
feat_pca = pca.fit_transform(feat)
fig.add_subplot(133)
plt.scatter(feat_pca[:,0], feat_pca[:,1], c = gt_label_allFrame ,cmap = cmap, alpha=0.2)
plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
plt.title('allFrame PCA')

print('pca explained_variance_ratio_ : ',pca.explained_variance_ratio_)


#======================绘制某个点与所有视频帧的相似度======================
sampleId = 10

# =====读取这帧视频=====
video.set(cv2.CAP_PROP_POS_FRAMES, sampleId)
ret, frame = video.read()
resize_ratio = 0.7
frame = cv2.resize(frame,(int(video_width*resize_ratio),int(video_height*resize_ratio)))
cv2.imshow('frame %d'%sampleId, frame)

# =====这帧与所有帧的相似度散点图=====
sample_feat = feat[sampleId]
sample_feat = sample_feat[:,np.newaxis]
dises = np.matmul(feat, sample_feat)

fig = plt.figure()
fig.add_subplot(121)
a = plt.scatter(list(range(len(dises))), dises[:,0], s = 1, zorder = 0)
plt.title('similarity of frame id %d to other frames'%sampleId)
plt.scatter(anchor_idx, dises[anchor_idx], c='k', s=15, zorder = 1)
plt.scatter(sampleId,dises[sampleId],c='r', zorder = 2)
plt.ylim((-1, 1.2))

# =====这帧与所有帧的折线图=====
fig.add_subplot(122)
a = plt.plot(dises, zorder = 0)
plt.title('similarity of frame id %d to other frames'%sampleId)
plt.scatter(anchor_idx, dises[anchor_idx], c='k', s=15, zorder = 1)
plt.scatter(sampleId,dises[sampleId],c='r', zorder = 2)
plt.ylim((-1, 1.2))

# =====把相似度画在轨迹上=====
# dises[0,0]=1
# dises[1,0]=-1
min_dises = min(dises)
max_dises = max(dises)
cmap = 'bwr'
fig, ax = plt.subplots()
plt.scatter(GNSS[:,1], GNSS[:,0], s=1, c=dises[:,0], cmap=cmap, zorder = 1) # 车辆行驶轨迹
plt.axis('equal')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
min_dises = '%.3f'%min(dises)
max_dises = '%.3f'%max(dises)
mid_dises = '%.3f'%((min(dises)+max(dises))/2)
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels((min_dises, mid_dises,max_dises))
plt.scatter(GNSS[sampleId,1], GNSS[sampleId,0], s = 20, c='yellow') # 样本点
plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
for i in anchor_idx:
    pos_x = GNSS[i,1]
    pos_y = GNSS[i,0]
    ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
plt.title('frame %d similarity to other frames'%sampleId)

# =====把相似度画在轨迹上，投影到卫星图上=====
rotate = 0 # 角度制
shiftX = 625
shiftY = 620
dx = 0.777
dy = 0.777 # 以上参数都是手调的
alpha = 0.5

GNSS[:,0] = -GNSS[:,0]
GNSS[:,1] *= dx
GNSS[:,0] *= dy
GNSS[:,1] += shiftX
GNSS[:,0] += shiftY

cmap = 'bwr'

sample_feat = feat[sampleId]
sample_feat = sample_feat[:,np.newaxis]
dises = np.matmul(feat, sample_feat)
# dises[0,0]=1
# dises[1,0] = -1
fig = plt.figure()
ax = fig.add_subplot(111)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)
ax.scatter(GNSS[:,1], GNSS[:,0], s=1, c=dises[:,0], cmap=cmap, zorder = 1) # 车辆行驶轨迹
plt.axis('equal')
cbar = plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
min_dises = '%.3f'%min(dises)
max_dises = '%.3f'%max(dises)
mid_dises = '%.3f'%((min(dises)+max(dises))/2)
cbar.set_ticks([0,0.5,1])
cbar.set_ticklabels((min_dises, mid_dises,max_dises))
plt.scatter(GNSS[sampleId,1], GNSS[sampleId,0], s = 20, c='yellow') # 样本点
plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
for i in anchor_idx:
    pos_x = GNSS[i,1]
    pos_y = GNSS[i,0]
    ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
plt.title('frame %d similarity to other frames'%sampleId)

sampleId = 27674
feat2 = np.load('E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/round3.npy')
sample_feat = feat2[sampleId]
sample_feat = sample_feat[:,np.newaxis]
dises = np.matmul(feat, sample_feat)

plt.figure()
plt.hist(dises, bins=100,weights = np.zeros_like(dises) + 1 / len(dises), range=(-1,1))
plt.title('frame %d'%(sampleId))
plt.ylim(0,0.08)
plt.xlim(-1,1)
plt.show()
exit(0)

# # ==========相邻帧的相似度画在轨迹上===========
# interver_frame = 10
# neighbor_sim = np.zeros(frame_count)
# for i in range(interver_frame - 1, frame_count):
#     tmp1 = feat[i]
#     tmp2 = feat[i-interver_frame]
#     fuck = tmp1*tmp2
#     neighbor_sim[i] = np.sum(tmp1*tmp2)

# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)
# ax.scatter(GNSS[:,1], GNSS[:,0], s=1, c=neighbor_sim, cmap=cmap, zorder = 1) # 车辆行驶轨迹
# plt.axis('equal')
# plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
# plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
# for i in anchor_idx:
#     pos_x = GNSS[i,1]
#     pos_y = GNSS[i,0]
#     ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
# plt.title('neighbor %d frame similarity'%interver_frame)

# plt.figure()
# a = plt.plot(list(range(len(neighbor_sim))), neighbor_sim,  zorder = 0)
# plt.title('neighbor %d frame similarity'%interver_frame)
# plt.scatter(anchor_idx, neighbor_sim[anchor_idx], c='k', s=15, zorder = 1)

# =====这帧图像与所有图像相似度的直方图=====
fig = plt.figure()
fig.add_subplot(121)
dises = dises[:,0]
plt.hist(dises, bins=100,weights = np.zeros_like(dises) + 1 / len(dises), range=(-1,1))
plt.title('frame %d similarity to all frames histogram'%sampleId)
plt.ylim(0,0.08)
plt.xlim(-1,1)

# =====这帧图像与所有图像相似度的直方图=====
fig.add_subplot(122)
plt.hist(dises, bins=100)
plt.title('frame %d similarity to all frames histogram'%sampleId)
plt.ylim(0,1900)
plt.xlim(-1,1)

# dises1 = dises[gt_label_allFrame != 2]
# # =====这帧图像与所有非弯道相似度的直方图=====
# fig = plt.figure()
# fig.add_subplot(121)
# plt.hist(dises1, bins=100,weights = np.zeros_like(dises1) + 1 / len(dises1), range=(-1,1))
# plt.title('frame %d similarity to all not turn road histogram'%sampleId)
# plt.ylim(0,0.08)
# plt.xlim(-1,1)

# # =====这帧图像与所有非弯道相似度的直方图=====
# fig.add_subplot(122)
# plt.hist(dises1, bins=100)
# plt.title('frame %d similarity to all not turn road histogram'%sampleId)
# plt.ylim(0,1900)
# plt.xlim(-1,1)

# dises2 = dises[gt_label_allFrame == 3]
# # =====这帧图像与所有动态交通相似度的直方图=====
# fig = plt.figure()
# fig.add_subplot(121)
# plt.hist(dises2, bins=100,weights = np.zeros_like(dises2) + 1 / len(dises2), range=(-1,1))
# plt.title('frame %d similarity to all dynamic histogram'%sampleId)
# plt.ylim(0,0.08)
# plt.xlim(-1,1)

# # =====这帧图像与所有动态交通相似度的直方图=====
# fig.add_subplot(122)
# plt.hist(dises2, bins=100)
# plt.title('frame %d similarity to all dynamic histogram'%sampleId)
# plt.ylim(0,1900)
# plt.xlim(-1,1)


# # ======================可视化指定区域的特征======================
# roundLierIdx = list(range(0,1795)) + list(range(19802,19861)) + list(range(31697,34941))
# roundLierIdx = list(range(31705, 34168))
# dormIdx = list(range(24782,29554))
# natureIdx = list(range(6815, 7328)) + list(range(9043, 10009)) #+ list(range(13636, 13868)) + list(range(15209, 15316)) + list(range(18741, 18879))
# # roundLierIdx = list(range(0, 1729))
# # dormIdx = list(range(26329, 27700))
# # natureIdx = list(range(18733, 18871))

# candidateAreaIdx = roundLierIdx

# lierFeat = feat[candidateAreaIdx,:]
# dises = np.matmul(lierFeat, sample_feat)
# dises = dises[:,0]

# dises[0] = 1
# dises[1] = -1

# # =====这帧图像与某区域图像相似度的直方图=====
# plt.figure()
# plt.hist(dises, bins=100,weights = np.zeros_like(dises) + 1 / len(dises), range=(-1,1))
# # plt.hist(dises, bins = 100)
# plt.ylim(0,0.08)
# plt.xlim(-1,1)
# plt.title('frame %d similarity to area frames histogram'%sampleId)


# # =====这帧图像与某区域图像相似度的离散成n类画在卫星图上=====
# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)

# idx = np.array(candidateAreaIdx)[dises > 0.7]
# ax.scatter(GNSS[idx,1], GNSS[idx,0], s=1, c='r') # 车辆行驶轨迹

# idx = np.array(candidateAreaIdx)[dises < 0.4]
# ax.scatter(GNSS[idx,1], GNSS[idx,0], s=1, c='b') # 车辆行驶轨迹

# idx = np.array(candidateAreaIdx)[np.logical_and(dises > 0.4 , dises < 0.7)]
# ax.scatter(GNSS[idx,1], GNSS[idx,0], s=1, c='g') # 车辆行驶轨迹


# plt.axis('equal')
# plt.scatter(GNSS[sampleId,1], GNSS[sampleId,0], s = 20, c='yellow') # 样本点
# plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
# for i in anchor_idx:
#     pos_x = GNSS[i,1]
#     pos_y = GNSS[i,0]
#     ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
# plt.title('frame %d similarity to area frames--discretization'%sampleId)


# # =====这帧图像与某区域图像相似度画在卫星图上=====
# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)
# ax.scatter(GNSS[candidateAreaIdx,1], GNSS[candidateAreaIdx,0], s=1, c=dises, cmap=cmap, zorder = 1) # 车辆行驶轨迹
# plt.axis('equal')
# plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
# plt.scatter(GNSS[sampleId,1], GNSS[sampleId,0], s = 20, c='yellow') # 样本点
# plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
# for i in anchor_idx:
#     pos_x = GNSS[i,1]
#     pos_y = GNSS[i,0]
#     ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
# plt.title('frame %d similarity to area frames'%sampleId)

# # =====把区域的轨迹画在卫星图上=====
# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)
# ax.scatter(GNSS[roundLierIdx,1], GNSS[roundLierIdx,0], s=1, c='b')
# ax.scatter(GNSS[dormIdx,1], GNSS[dormIdx,0], s=1, c='r')
# ax.scatter(GNSS[natureIdx,1], GNSS[natureIdx,0], s=1, c='g')
# plt.axis('equal')
# plt.title('area traj')

# # =====计算区域内、区域间的相似度=====
# tmpFeat = feat[roundLierIdx+dormIdx+natureIdx, :]

# roundLierFeat = tmpFeat[0:len(roundLierIdx),:]
# dormFeat = tmpFeat[len(roundLierIdx):(len(roundLierIdx + dormIdx)),:]
# natureFeat = tmpFeat[len(roundLierIdx + dormIdx):len(roundLierIdx + dormIdx+natureIdx),:]

# roudLierFeat_trans = roundLierFeat.transpose(1,0)
# dormFeat_trans = dormFeat.transpose(1,0)
# natureFeat_trans = natureFeat.transpose(1,0)

# roundLier_matrix = np.matmul(roundLierFeat, roudLierFeat_trans)
# dorm_matrix = np.matmul(dormFeat, dormFeat_trans)
# nature_matrix = np.matmul(natureFeat, natureFeat_trans)

# sr = np.shape(roundLier_matrix)[0]
# roundLier_sim = (np.sum(roundLier_matrix) - roundLier_matrix.trace()) / (sr*(sr-1))
# sd = np.shape(dorm_matrix)[0]
# dorm_sim = (np.sum(dorm_matrix) - dorm_matrix.trace()) / (sd*(sd-1))
# sn = np.shape(nature_matrix)[0]
# nature_sim = (np.sum(nature_matrix) - nature_matrix.trace()) / (sn*(sn-1))

# r_d = np.average(np.matmul(roundLierFeat, dormFeat_trans))
# r_n = np.average(np.matmul(roundLierFeat, natureFeat_trans))
# d_n = np.average(np.matmul(dormFeat, natureFeat_trans))

# print('inside area similarity', roundLier_sim, nature_sim, dorm_sim)
# print('betewwn area similarity', r_n, r_d, d_n)

# # =====对区域进行聚类=====
# cluster_num = 2
# cmap = 'rainbow'
# kmeans = KMeans(n_clusters=cluster_num).fit(roundLierFeat)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)
# ax.scatter(GNSS[roundLierIdx,1], GNSS[roundLierIdx,0], s=1, c=kmeans.labels_, cmap=cmap, zorder = 1) # 车辆行驶轨迹
# plt.axis('equal')
# plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
# plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
# plt.title('area cluster--%d'%cluster_num)

# # ======计算直路、弯路内部相似度与之间的相似度=====
# straight_feat = feat[gt_label_allFrame == 1]
# straight_feat_trans = straight_feat.transpose(1,0)
# turn_feat = feat[gt_label_allFrame == 2]
# turn_feat_trans = turn_feat.transpose(1,0)
# dyna_feat = feat[gt_label_allFrame == 3]
# dyna_feat_trans = dyna_feat.transpose(1,0)

# straight_sim = np.matmul(straight_feat, straight_feat_trans)
# eye_matrix = np.logical_not(np.eye(np.shape(straight_sim)[0], dtype=bool))
# straight_sim = straight_sim[eye_matrix]
# straight_sim_avg = np.average(straight_sim)
# straight_sim_std = np.std(straight_sim)


# turn_sim = np.matmul(turn_feat, turn_feat_trans)
# eye_matrix = np.logical_not(np.eye(np.shape(turn_sim)[0], dtype=bool))
# turn_sim = turn_sim[eye_matrix]
# turn_sim_avg = np.average(turn_sim)
# turn_sim_std = np.std(turn_sim)

# dyna_sim = np.matmul(dyna_feat, dyna_feat_trans)
# eye_matrix = np.logical_not(np.eye(np.shape(dyna_sim)[0], dtype=bool))
# dyna_sim = dyna_sim[eye_matrix]
# dyna_sim_avg = np.average(dyna_sim)
# dyna_sim_std = np.std(dyna_sim)

# straight_turn_sim = np.average(np.matmul(straight_feat, turn_feat_trans))
# straight_dyna_sim = np.average(np.matmul(straight_feat, dyna_feat_trans))
# turn_dyna_sim = np.average(np.matmul(turn_feat, dyna_feat_trans))

# straight_turn_std = np.std(np.matmul(straight_feat, turn_feat_trans))
# straight_dyna_std = np.std(np.matmul(straight_feat, dyna_feat_trans))
# turn_dyna_std = np.std(np.matmul(turn_feat, dyna_feat_trans))

# print('straight_sim_avg %f\n turn_sim_avg %f\n dyna_sim_avg %f\n straight_turn_sim %f\n straight_dyna_sim %f\n turn_dyna_sim %f\n'%(straight_sim_avg, turn_sim_avg, dyna_sim_avg,straight_turn_sim, straight_dyna_sim, turn_dyna_sim))
# print('straight_sim_std %f\n turn_sim_std %f\n dyna_sim_std %f \n straight_turn_std %f\n straight_dyna_std %f \n turn_dyna_std %f\n'%(straight_sim_std, turn_sim_std, dyna_sim_std, straight_turn_std, straight_dyna_std, turn_dyna_std))

# ========计算每个样本与所有视频帧的相似度，统计相似度分布，作为该样本的新特征=========

sim_hist_feat_path = feature_path[0:-4] + '_simHistFeat.npy'
if os.path.isfile(sim_hist_feat_path):
    sim_hist_feat = np.load(sim_hist_feat_path)
else:
    all_feat_sim = np.matmul(feat, feat.transpose(1,0))
    sim_hist_feat = []
    bins = 100
    for i in range(frame_count):
        cur_sim = all_feat_sim[i]
        mask = np.ones(frame_count, dtype=bool)
        mask[i] = False
        mask[frame_count:] = False
        cur_sim = cur_sim[mask]
        cur_hist = np.histogram(cur_sim, bins=bins, weights = np.zeros_like(cur_sim) + 1 / len(cur_sim) ,range=(-1,1))[0]
        sim_hist_feat.append(cur_hist)

    sim_hist_feat = np.array(sim_hist_feat)
    sim_hist_feat_path = feature_path[0:-4] + '_simHistFeat.npy'
    np.save(sim_hist_feat_path, sim_hist_feat)


# straight_sim = all_feat_sim[gt_label_allFrame==1]
# turn_sim = all_feat_sim[gt_label_allFrame==2]
# dyna_sim = all_feat_sim[gt_label_allFrame==3]

# straight_sim = straight_sim.reshape(-1)
# turn_sim = turn_sim.reshape(-1)
# dyna_sim = dyna_sim.reshape(-1)

# plt.figure()
# plt.hist(straight_sim, bins=100,weights = np.zeros_like(straight_sim) + 1 / len(straight_sim), range=(-1,1))
# plt.title('straight avg hist')
# plt.ylim(0,0.03)
# plt.xlim(-1,1)

# plt.figure()
# plt.hist(turn_sim, bins=100,weights = np.zeros_like(turn_sim) + 1 / len(turn_sim), range=(-1,1))
# plt.title('turn avg hist')
# plt.ylim(0,0.03)
# plt.xlim(-1,1)

# plt.figure()
# plt.hist(dyna_sim, bins=100,weights = np.zeros_like(dyna_sim) + 1 / len(dyna_sim), range=(-1,1))
# plt.title('dyna avg hist')
# plt.ylim(0,0.03)
# plt.xlim(-1,1)


# # ============根据相似度进行分类============
# straight_sim = feat[gt_label_allFrame==1]
# turn_sim = feat[gt_label_allFrame==2]
# dyna_sim = feat[gt_label_allFrame==3]

# straight_sim = np.average(straight_sim, axis=0)
# turn_sim = np.average(turn_sim, axis=0)
# dyna_sim = np.average(dyna_sim, axis=0)

# cal_label_allFrame = [3 for i in range(frame_count)]
# for i in range(frame_count):
#     cur_feat = feat[i]
#     js_s = np.sum(cur_feat * straight_sim)
#     js_t = np.sum(cur_feat * turn_sim)
#     js_d = np.sum(cur_feat * dyna_sim)
#     if js_s > js_t and js_s > js_d:
#         cal_label_allFrame[i] = 1
#     elif js_t > js_s and js_t > js_d:
#         cal_label_allFrame[i] = 2

# classes = ['straight','turn','dyna']
# cm = confusion_matrix(gt_label_allFrame, cal_label_allFrame)
# plot_confusion_matrix(cm, classes, title='sim confusion matrix')


# # =================根据全局相似度分布进行分类，真值=======================
# train_gt_label_allFrame = gt_label_allFrame.copy()
# train_gt_label_allFrame[frame_count:] = 0

# straight_sim_hist_feat = sim_hist_feat[train_gt_label_allFrame==1]
# turn_sim_hist_feat = sim_hist_feat[train_gt_label_allFrame==2]
# dyna_sim_hist_feat = sim_hist_feat[train_gt_label_allFrame==3]

# straight_sim_hist_feat = np.average(straight_sim_hist_feat, axis=0)
# turn_sim_hist_feat = np.average(turn_sim_hist_feat, axis=0)
# dyna_sim_hist_feat = np.average(dyna_sim_hist_feat, axis=0)

# cal_label_allFrame = [3 for i in range(frame_count)]
# for i in range(frame_count):
#     cur_hist = sim_hist_feat[i]
#     js_s = JS_D(cur_hist, straight_sim_hist_feat)
#     js_t = JS_D(cur_hist, turn_sim_hist_feat)
#     js_d = JS_D(cur_hist, dyna_sim_hist_feat)
#     if js_s < js_t and js_s < js_d:
#         cal_label_allFrame[i] = 1
#     elif js_t < js_s and js_t < js_d:
#         cal_label_allFrame[i] = 2
    
# classes = ['straight','turn','dyna']
# cm = confusion_matrix(gt_label_allFrame[0:frame_count], cal_label_allFrame[0:frame_count])
# plot_confusion_matrix(cm, classes, title='GT hist confusion matrix on training set')



# # ==================画全局相似度分布========================
# fig = plt.figure()
# ax = fig.add_subplot(1, 3, 1)
# plt.bar(range(100), straight_sim_hist_feat,width=1, align='edge')
# plt.ylim(0,0.04)
# ax.set_xticks([0,25,50,75,99])
# ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
# plt.title('GT straight scene descriptor')


# ax = fig.add_subplot(1, 3, 2)
# plt.bar(range(100), turn_sim_hist_feat,width=1, align='edge')
# plt.ylim(0,0.04)
# ax.set_xticks([0,25,50,75,99])
# ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
# plt.title('GT turn scene descriptor')


# ax = fig.add_subplot(1, 3, 3)
# plt.bar(range(100), dyna_sim_hist_feat,width=1, align='edge')
# plt.ylim(0,0.04)
# ax.set_xticks([0,25,50,75,99])
# ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
# plt.title('GT dyna scene descriptor')


# ===================计算scene descriptor========================
straight_seed_hist = sim_hist_feat[14960]
turn_seed_hist = sim_hist_feat[21077]
dyna_seed_hist = sim_hist_feat[10723]

straight_scene_descriptor = []
turn_scene_descriptor = []
dyna_scene_descriptor = []

for i in range(frame_count):
    cur_hist = sim_hist_feat[i]
    js_s = JS_D(cur_hist, straight_seed_hist)
    js_t = JS_D(cur_hist, turn_seed_hist)
    js_d = JS_D(cur_hist, dyna_seed_hist)
    if js_s < js_t and js_s < js_d:
        straight_scene_descriptor.append(cur_hist)
    elif js_t < js_s and js_t < js_d:
        turn_scene_descriptor.append(cur_hist)
    else:
        dyna_scene_descriptor.append(cur_hist)

straight_scene_descriptor = np.average(np.array(straight_scene_descriptor), axis=0)
turn_scene_descriptor =  np.average(np.array(turn_scene_descriptor), axis=0)
dyna_scene_descriptor =  np.average(np.array(dyna_scene_descriptor), axis=0)

# =================画scene descriptor=================
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
plt.bar(range(100), straight_scene_descriptor,width=1, align='edge')
# plt.plot(straight_scene_descriptor)
plt.ylim(0,0.04)
ax.set_xticks([0,25,50,75,99])
ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
plt.title('cal straight scene descriptor')

ax = fig.add_subplot(1, 3, 2)
plt.bar(range(100), turn_scene_descriptor,width=1, align='edge')
# plt.plot(turn_scene_descriptor)
plt.ylim(0,0.04)
ax.set_xticks([0,25,50,75,99])
ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
plt.title('cal turn scene descriptor')


ax = fig.add_subplot(1, 3, 3)
plt.bar(range(100), dyna_scene_descriptor,width=1, align='edge')
# plt.plot(dyna_scene_descriptor)
plt.ylim(0,0.04)
ax.set_xticks([0,25,50,75,99])
ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
plt.title('cal dyna scene descriptor')


plt.show()
exit(0)

# ==================根据scene descriptor进行场景分类====================
cal_label_allFrame = [3 for i in range(frame_count)]
for i in range(frame_count):
    cur_hist = sim_hist_feat[i]
    js_s = JS_D(cur_hist, straight_scene_descriptor)
    js_t = JS_D(cur_hist, turn_scene_descriptor)
    js_d = JS_D(cur_hist, dyna_scene_descriptor)
    if js_s < js_t and js_s < js_d:
        cal_label_allFrame[i] = 1
    elif js_t < js_s and js_t < js_d:
        cal_label_allFrame[i] = 2

cal_label_allFrame_path = 'D:/Research/2020ContrastiveLearningForSceneLabel/Data/campus_img_dataset/cal_label_allFrame.txt'
f = open(cal_label_allFrame_path,'w')
for i in cal_label_allFrame:
    f.write(str(i)+'\n')
f.close()
    
classes = ['straight','turn','dyna']
cm = confusion_matrix(gt_label_allFrame[:frame_count], cal_label_allFrame[:frame_count])
plot_confusion_matrix(cm, classes, title='cal hist confusion matrix on training set')

print('<================Training set classification report================>')
print(classification_report(gt_label_allFrame[:frame_count],cal_label_allFrame[:frame_count],digits=3))
print('<================Training set classification report================>')

# ===================将分类结果投影到轨迹上=======================
cmap = 'rainbow'
fig = plt.figure()
ax = fig.add_subplot(111)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)
ax.scatter(GNSS[0:frame_count,1], GNSS[0:frame_count,0], s=1, c=cal_label_allFrame[0:frame_count], cmap=cmap, zorder = 1) # 车辆行驶轨迹
plt.axis('equal')
plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))

train_anchor_idx = np.array(anchor_idx)
train_anchor_idx = train_anchor_idx[train_anchor_idx < frame_count]

plt.scatter(GNSS[train_anchor_idx,1], GNSS[train_anchor_idx,0], s = 2, c='k') # 所有锚点
for i in train_anchor_idx:
    pos_x = GNSS[i,1]
    pos_y = GNSS[i,0]
    ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
plt.title('scene classificaton on training set -- base on cal scene descriptor')


# straight_sim_hist_feat = list(sim_hist_feat[gt_label_allFrame==1])
# turn_sim_hist_feat = list(sim_hist_feat[gt_label_allFrame==2])
# dyna_sim_hist_feat = list(sim_hist_feat[gt_label_allFrame==3])

# # ======计算特征相似度分布间的JS散度==========


# loop = 200000
# res = 0
# start = time.time()
# for i in range(loop):
#     p = random.sample(dyna_sim_hist_feat, 1)[0]
#     q = random.sample(turn_sim_hist_feat, 1)[0]
#     res += js(p,q)
# res /= loop
# print('time %f\n'%(time.time()-start))
# print(res)

print('<================Testing set classification report================>')
print(classification_report(gt_label_allFrame[frame_count:],cal_label_allFrame[frame_count:],digits=3))
print('<================Testing set classification report================>')
classes = ['straight','turn','dyna']
cm = confusion_matrix(gt_label_allFrame[frame_count:], cal_label_allFrame[frame_count:])
plot_confusion_matrix(cm, classes, title='cal hist confusion matrix on testing set')


cmap = 'rainbow'
fig = plt.figure()
ax = fig.add_subplot(111)
img = gImage.imread(pkuBirdViewImg)
img = img[0:1087,:,:]
img[:,:,3] = alpha
ax.imshow(img, zorder = 0)
ax.scatter(GNSS[frame_count:,1], GNSS[frame_count:,0], s=1, c=cal_label_allFrame[frame_count:], cmap=cmap, zorder = 1) # 车辆行驶轨迹
plt.axis('equal')
plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))

test_anchor_idx = np.array(anchor_idx)
test_anchor_idx = test_anchor_idx[test_anchor_idx >= frame_count]

plt.scatter(GNSS[test_anchor_idx,1], GNSS[test_anchor_idx,0], s = 2, c='k') # 所有锚点
for i in test_anchor_idx:
    pos_x = GNSS[i,1]
    pos_y = GNSS[i,0]
    ax.text(pos_x, pos_y, str(i), fontsize=10, alpha = 0.1)
plt.title('scene classificaton on testing set-- base on cal scene descriptor')

testID = 35898
test = sim_hist_feat[testID]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.bar(range(100), test,width=1, align='edge')
plt.ylim(0,0.04)
ax.set_xticks([0,25,50,75,99])
ax.set_xticklabels(['-1','-0.5','0','0.5','1'])
plt.title('%d hist'%testID)



#======================计算锚点与正负样本间的平均距离======================
loop = 1
avg_pos_dis = 0
avg_neg_dis = 0

for i in range(loop):
    imgs_idx = []
    for i in anchor_pos_list:
        imgs_idx += i[1]
    imgs_feat = feat[imgs_idx] # 获取所有样本点的特征

    pos_idx = [random.sample(anchor_pos_list[i][1],1)[0] for i in range(len(anchor_pos_list)) for j in anchor_pos_list[i][1]]
    pos_feat = feat[pos_idx]
    pos_feat = pos_feat.transpose(1,0)
    matrix = np.matmul(imgs_feat, pos_feat)
    matrix = np.exp(matrix)
    pos_dis = matrix.trace()/len(imgs_idx)
    avg_pos_dis += pos_dis

    neg_idx = [random.sample(anchor_neg_list[i][1],1)[0] for i in range(len(anchor_neg_list)) for j in anchor_pos_list[i][1]]
    neg_feat = feat[neg_idx]
    neg_feat = neg_feat.transpose(1,0)
    matrix = np.matmul(imgs_feat, neg_feat)
    matrix = np.exp(matrix)
    neg_dis = matrix.trace()/len(imgs_idx)
    avg_neg_dis += neg_dis
print('anchor to positive average distance is %f, to negative is %f'%(avg_pos_dis/loop, avg_neg_dis/loop))

# # ====================== 全局聚类 ======================
# cluster_num = 3
# cmap = 'rainbow'
# kmeans = KMeans(n_clusters=cluster_num).fit(feat)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# img = gImage.imread(pkuBirdViewImg)
# img = img[0:1087,:,:]
# img[:,:,3] = alpha
# ax.imshow(img, zorder = 0)
# ax.scatter(GNSS[:,1], GNSS[:,0], s=1, c=kmeans.labels_, cmap=cmap, zorder = 1) # 车辆行驶轨迹
# plt.axis('equal')
# plt.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap))
# plt.scatter(GNSS[anchor_idx,1], GNSS[anchor_idx,0], s = 2, c='k') # 所有锚点
# plt.title('cluster--%d'%cluster_num)

plt.show()
exit(0) 
