import numpy as np
from cv2 import cv2
import re
import matplotlib.pyplot as plt
from utils2 import *
import random
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib
import matplotlib.image as gImage
from sklearn.manifold import TSNE
from matplotlib.ticker import FuncFormatter
import scipy.stats
import time
import random
from sklearn.metrics import confusion_matrix
import copy
import pickle

training_round = 2 # 指定training round，输出同步到第sync_round圈的ds视频。输出的视频中，红框表示训练数据集

for sync_round in [3]:

    candidate1_round = list(set([1,2,3]) - set([sync_round]))[0]
    candidate2_round = list(set([1,2,3]) - set([sync_round]))[1]

    ####################### 输入文件 #######################
    index_map_file = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/IndexMap.pickle'

    sync_ds_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_%dds_sample32.npy'%(training_round, training_round, sync_round)
    sync_position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_sync_position.npy'%(sync_round, sync_round)
    sync_video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d.avi'%(sync_round, sync_round)

    candidate1_ds_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_%dds_sample32.npy'%(training_round, training_round, candidate1_round)
    candidate1_position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_sync_position.npy'%(candidate1_round, candidate1_round)
    candidate1_video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d.avi'%(candidate1_round, candidate1_round)

    candidate2_ds_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_%dds_sample32.npy'%(training_round, training_round, candidate2_round)
    candidate2_position_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_sync_position.npy'%(candidate2_round, candidate2_round)
    candidate2_video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d.avi'%(candidate2_round, candidate2_round)

    training_ds_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_%dds_sample32.npy'%(training_round, training_round, training_round)

    ####################### 输出文件 #######################
    out_video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round%d/round%d_ds_sample32.avi'%(training_round,sync_round)
    
    ####################### 数据加载及参数设置 #######################
    index_map_file = open(index_map_file,'rb')
    index_map = pickle.load(index_map_file)
    index_map_file.close()
    mapping = index_map[sync_round - 1]

    training_ds = np.load(training_ds_path)

    sync_ds = np.load(sync_ds_path)
    sync_video = cv2.VideoCapture(sync_video_path)
    sync_gnss = np.load(sync_position_path)

    frame_count = np.shape(sync_ds)[0]

    candidate1_ds = np.load(candidate1_ds_path)
    candidate1_video = cv2.VideoCapture(candidate1_video_path)
    candidate1_gnss = np.load(candidate1_position_path)

    candidate2_ds = np.load(candidate2_ds_path)
    candidate2_video = cv2.VideoCapture(candidate2_video_path)
    candidate2_gnss = np.load(candidate2_position_path)

    resize_ratio = 0.7
    video_width = int(sync_video.get(cv2.CAP_PROP_FRAME_WIDTH) * resize_ratio)
    video_height = int(sync_video.get(cv2.CAP_PROP_FRAME_HEIGHT) * resize_ratio)

    ####################### 配置输出视频 #######################

    # out_video_path = 'E:/Research/2020ContrastiveLearningForSceneLabel/Data/20210329ExperimentData/round2/dsVideoClip/%d.avi'%start
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    width = video_width * 3
    height = video_height + 400
    out = cv2.VideoWriter(out_video_path,fourcc,30.0,(width,height))
    
    start = 0
    end = frame_count
    for i in range(start, end):

        i1 = mapping[i][candidate1_round-1]
        i2 = mapping[i][candidate2_round-1]

        # 读取sync视频图像，并在sync图像画红框
        sync_video.set(cv2.CAP_PROP_POS_FRAMES, i)
        ret, sync_frame = sync_video.read()
        sync_frame = cv2.resize(sync_frame,(video_width, video_height))
        if sync_round == training_round:
            cv2.rectangle(sync_frame, (0,0),(video_width, video_height),(0,0,255),6)

        # 读取candidate1视频图像
        candidate1_video.set(cv2.CAP_PROP_POS_FRAMES, i1)
        ret, candidate1_frame = candidate1_video.read()
        candidate1_frame = cv2.resize(candidate1_frame,(video_width, video_height))
        if candidate1_round == training_round:
            cv2.rectangle(candidate1_frame, (0,0),(video_width, video_height),(0,0,255),4)

        # 读取candidate2视频图像
        candidate2_video.set(cv2.CAP_PROP_POS_FRAMES, i2)
        ret, candidate2_frame = candidate2_video.read()
        candidate2_frame = cv2.resize(candidate2_frame,(video_width, video_height))
        if candidate2_round == training_round:
            cv2.rectangle(candidate2_frame, (0,0),(video_width, video_height),(0,0,255),4)

        # 把三个视频图像拼接
        frame = np.concatenate((sync_frame, candidate1_frame,candidate2_frame),axis=1)

        # 画DS
        fig = plt.figure(figsize=(21, 4))
        fig.subplots_adjust(left = 0.05, right = 0.95, wspace = 0.4)
        
        # 画sync的DS
        ax = fig.add_subplot(131)
        plt.bar(range(100), sync_ds[i],width=1, align='edge')
        plt.title('round %d DS of frame %d'%(sync_round ,i))
        plt.ylim(0,0.08)
        ax.set_xticks([0,25,50,75,99])
        ax.set_xticklabels(['-1','-0.5','0','0.5','1'])

        # 画candidate1的DS
        ax = fig.add_subplot(132)
        plt.bar(range(100), candidate1_ds[i1],width=1, align='edge')
        plt.title('round %d DS of frame %d'%(candidate1_round, i1))
        plt.ylim(0,0.08)
        ax.set_xticks([0,25,50,75,99])
        ax.set_xticklabels(['-1','-0.5','0','0.5','1'])

        # 画candidate2的DS
        ax = fig.add_subplot(133)
        plt.bar(range(100), candidate2_ds[i2],width=1, align='edge')
        plt.title('round %d DS of frame %d'%(candidate2_round, i2))
        plt.ylim(0,0.08)
        ax.set_xticks([0,25,50,75,99])
        ax.set_xticklabels(['-1','-0.5','0','0.5','1'])

        ds = fig2data(fig)
        ds = ds[:,:,0:3]
        plt.close()
        ds = cv2.resize(ds,(video_width*3, 400))
        
        img = np.concatenate((frame, ds), axis=0)

        out.write(img)
        # cv2.imshow('img',img)
        # # cv2.imshow('alerting traffic', frame)
        # # cv2.imshow('ds', ds)
        # # plt.show()
        # cv2.waitKey(0)
        print(i)


    out.release()
    sync_video.release()
    candidate1_video.release()
    candidate2_video.release()


