import pickle

import torch.cuda
import torch.utils.data as data
import torch
import numpy as np

import torchvision.transforms as transforms
import torch.nn as nn
from sklearn import preprocessing
import cv2
import random
import math
import matplotlib.pyplot as plt
import scipy.io
import csv
from pyquaternion import Quaternion
import get_fixation
import time

# added arguments and cnn_model
from Arguments import get_args
import convlstm

class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.val = 0
        self.avg = 0
        self.summ = 0
        self.count = 0

    def reset(self):
        self.val = 0
        self.avg = 0
        self.summ = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.summ += val * n
        self.count += n
        self.avg = self.summ / self.count

def datasetinput(PATH):
    with open(PATH, 'rb') as f:
        return pickle.load(f,encoding='bytes')

# 根据方向计算角度
def vector_to_ang(_v):
    # v = np.array(vector_ds[0][600][1])
    # v = np.array([0, 0, 1])
    _v = np.array(_v)
    # degree between v and [0, 1, 0]
    alpha = get_fixation.degree_distance(_v, [0, 1, 0])
    phi = 90.0 - alpha
    # proj1 is the projection of v onto [0, 1, 0] axis
    proj1 = [0, np.cos(alpha/180.0 * np.pi), 0]
    # proj2 is the projection of v onto the plane([1, 0, 0], [0, 0, 1])
    proj2 = _v - proj1
    # theta = degree between project vector to plane and [1, 0, 0]
    theta = get_fixation.degree_distance(proj2, [1, 0, 0])
    sign = -1.0 if get_fixation.degree_distance(_v, [0, 0, -1]) > 90 else 1.0
    theta = sign * theta
    return theta, phi

# 根据角度得到二维平面的视点坐标
def ang_to_geoxy(_theta, _phi, _h, _w):
    x = _h/2.0 - (_h/2.0) * np.sin(_phi/180.0 * np.pi)
    temp = _theta
    if temp < 0:
        temp = 180 + temp + 180
    temp = 360 - temp
    y = (temp * 1.0/360 * _w)
    return int(x), int(y)

'''
def conrespond_point_function(pic,fixation):
    fixation_set = []
    fixation_map = []

    # file = open("fixation_set.txt","a")
    for fix_i in fixation:
        theta, phi = vector_to_ang(fix_i)
        x, y = ang_to_geoxy(theta, phi, 90, 160)
        fixation_set.append([x, y])
        #file.write(str([x,y])+'\n')

        # a = ((fix_i[0]*180+180)/360)*160#Width
        # b = ((1-math.sin(fix_i[1]))/2)*90#Height
        # fixation_set.append([a,b])

        # map_tmp = np.zeros((90,160))
        # x_ = math.floor(x-16)
        # y_ = math.floor(y-9)
        # for x in range(32):
        #     for y in range(18):
        #         if x_+x<90 and y_+y<160:
        #             map_tmp[x_+x][y_+y] = 1
        # fixation_map.append(map_tmp)

    return fixation_set,fixation_map
'''

def point_to_pic(data_i):
    #fixation_map = []
    map_tmp = np.zeros((90,160))
    x_ = math.floor(data_i[0]-16)
    y_ = math.floor(data_i[0]-9)
    for x in range(32):
        for y in range(18):
            if x_+x<90 and y_+y<160:
                map_tmp[x_+x][y_+y] = 1
    #fixation_map.append(map_tmp)
    return map_tmp#fixation_map

# 获取观看视频用户的头部方向（针对某个视频的一个用户）
def data_prepare(idx, idy, UserId, N):

    Userdata = []
    UserFile = 'D:/VR_project/LiveDeep_All/vr-dataset/Experiment_' + str(idx) + '/' + str(UserId) + "/video_" + str(idy) + ".csv"

    with open(UserFile) as csvfile:
        csv_reader = csv.reader(csvfile)
        csvLength = np.array(list(csv_reader)).shape[0]

    with open(UserFile) as csvfile:
        csv_reader = csv.reader(csvfile)
        birth_header = next(csv_reader)
        index = 0
        for row in csv_reader:
            if index % (csvLength / N) < 1:
                v0 = [0, 0, 1]
                q = Quaternion([float(row[4]), -float(row[3]), float(row[2]), -float(row[1])])
                Userdata.append(q.rotate(v0))

            index = index + 1

    Userdata = np.array(Userdata)
    return Userdata

# 根据视点坐标生成fixation map
def create_fixation_map(_X, _y, _idx, H, W):
    v = _y[_idx]
    theta, phi  = vector_to_ang(v)
    hi, wi = ang_to_geoxy(theta, phi, H, W)
    result = np.zeros(shape=(H, W))
    result[H-hi-1, W-wi-1] = 1
    return result

# 构造数据集
def create_saldataset(dataset):
    # dataset has shape NxC
    dataX = []
    # dataY = []
    # with open("fixation_set.txt") as f:
    #     data_tmp = f.read()
    # data_tmp = np.load("dataY.npy",allow_pickle=True)
    for i in range(len(dataset)):
        a = dataset[i, 2]
        dataX.append(a)

    data_Y = np.load("dataY.npy", allow_pickle=True)

    return np.array(dataX), data_Y


# 对视频的每一帧生成用户真实的fixation map (N, 90, 160) 
def create_user_trace(dataset, indny, userID):
    '''
        dataset[i] = [timestamp, fixationList, saliencyMap]
    '''
    dataX = [i[2] for i in dataset]
    dataX = np.array(dataX)

    N, H, W = dataX.shape
    series = data_prepare(1, indny, userID, N)

    # gblur_size_width = 61
    # gblur_size_high = 31
    gblur_size_width = 23
    gblur_size_high = 11
    mmscaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
    dataY = np.array([create_fixation_map(None, series, idx, H, W) for idx,_ in enumerate(series)])
    headmap = np.array([cv2.GaussianBlur(item, (gblur_size_width, gblur_size_high), 0) for item in dataY])
    headmap = mmscaler.fit_transform(headmap.ravel().reshape(-1, 1)).reshape(headmap.shape)

    # headmap[headmap > 0.01] = 1
    # headmap[headmap != 1] = 0


    # for i in range(len(dataset)):
    #    a = dataset[i, 2]
    #    dataX.append(a)
    #    dataX_np = np.array(dataX)
    # 
    #    N, H, W = dataX_np.shape
    #    series = data_prepare(1, indny, userID+1, N)
    #    conrespond_point, fixation_map = conrespond_point_function(a, series)
    #    dataY.append(conrespond_point)

    np.save("dataY.npy", headmap*10)
    return N

'''
    if extract_flag == True:
        b = dataset[i, 1]
        conrespond_point, fixation_map = conrespond_point_function(a, b)
        dataY.append(conrespond_point)
    if extract_flag == False:
        data_Y = np.load("dataY.npy", allow_pickle=True)
        data_Y_use = data_Y
        for data_i in range(len(data_Y)):
            x_sum = 0
            y_sum = 0
            x_avg = 0
            y_avg = 0
            number = 0
            for data_j in data_Y[data_i]:
                x_sum += data_j[0]
                y_sum += data_j[1]
                number += 1
            x_avg = x_sum/number
            y_avg = y_sum/number
            data_Y[data_i] = [x_avg, y_avg]

            data_i_use = data_Y[data_i]
            data_tmp = point_to_pic(data_i_use)
            data_Y[data_i] = data_tmp

        #for data_i in data_Y:
            # map_tmp = np.zeros((90,160))
            # x_ = math.floor(x-16)
            # y_ = math.floor(y-9)
            # for x in range(32):
            #     for y in range(18):
            #         if x_+x<90 and y_+y<160:
            #             map_tmp[x_+x][y_+y] = 1
            #fixation_map.append(map_tmp)

    #for j in range(len(dataset)-train_len):
        #dataY.append(dataset[j, :])

    #dataY_array = np.array(dataY)
    #np.save("dataY.npy",dataY_array)
    '''

class SaliencyDataset(data.Dataset):
    # 初始化数据集
    def __init__(self, saliency_map, predict_time, point_map, transform=None):
        # self.saliency_list = saliency_map
        self.predict_dir = saliency_map
        self.predict_time = predict_time
        self.transform = transform
        self.point_map = point_map

    # 返回数据的长度
    def __len__(self):
        return len(self.predict_dir)

    # 返回数据集中的元素
    def __getitem__(self, idx):
        if idx<=len(self.predict_dir)-self.predict_time:
            predict = self.predict_dir[idx+self.predict_time]
            saliency = self.predict_dir[idx]
            point_map = self.point_map[idx]
        else:
            predict = self.predict_dir[idx]
            saliency = self.predict_dir[idx]
            point_map = self.point_map[idx]
        if self.transform:
            saliency=self.transform(saliency)
            predict=self.transform(predict)
            point_map = self.transform(point_map)
        return saliency, predict, point_map

# def sequencentialData(dataset,i,tlen):
#     sal_list, pre_list = [], []
#     for j in range(tlen):
#         saliency, predict = dataset.  .next()
#         sal_list.append(saliency)
#         pre_list.append(predict)
#         sal_output= torch.stack(sal_list,dim=0)
#         pre_output= torch.stack(pre_list,dim=0)
#         sal_output =torch.unsqueeze(sal_output,1)
#         pre_output = torch.unsqueeze(pre_output,1)
#         return sal_output, pre_output

def sequencentialData(dataset, i, tlen):
    sal_list, pre_list, point_list = [], [], []
    input_list = []
    for j in range(tlen):
        saliency, predict, point_map=dataset[i+j]
        sal_list.append(saliency)
        pre_list.append(predict)
        point_list.append(point_map)
        sal_squ = torch.squeeze(saliency, 0)
        point_squ = torch.squeeze(point_map, 0)
        # input_tmp = torch.tensor([saliency,point_map])
        input_tmp = torch.stack([sal_squ,point_squ])#.expand(2,160,90)
        # input_tmp[1] = point_map
        # np.insert(input_tmp,2,point_map,axis=0)
        # input_tmp[1] = point_map#[saliency,point_map]
        # input_list.append(np.stack(input_tmp))
        input_list.append(input_tmp)
    sal_output= torch.stack(sal_list,dim=0)
    pre_output= torch.stack(pre_list,dim=0)
    point_output = torch.stack(point_list,dim=0)
    input_output = torch.stack(input_list,dim=0)#torch.tensor(input_list)
    sal_output =torch.unsqueeze(sal_output,1)
    pre_output = torch.unsqueeze(pre_output,1)
    point_output = torch.unsqueeze(point_output,1)
    input_output = torch.unsqueeze(input_output, 1)
    return sal_output, pre_output, point_output,input_output

def train(dataset, endTime, tlen):
    # torch.set_default_dtype(torch.float64)
    # torch.type(torch.float64)
    net = convlstm.ConvLSTM_CNN(input_dim=2, hidden_dim=6, kernel_size=(5, 5), num_layers=1)
    if torch.cuda.is_available():
        net = net.cuda()

    loss_function = nn.MSELoss()
    # optimizer = optim.SGD(net.parameters(), lr=0.001,weight_decay=0.5)
    optimizer = torch.optim.Adam(net.parameters(), lr=0.000001, betas=(0.9, 0.99), weight_decay=0.5)
    running_loss = 0
    min_loss = float('inf')
    max_loss = -float('inf')
    for epoch in range(args.train_time):
        a = random.randint(endTime-tlen, endTime)
        # sal_list, pre_list = dataset[epoch]
        sal_list, pre_list, point_list, input_list = sequencentialData(dataset, a, tlen)

        if torch.cuda.is_available():
            input_list = input_list.cuda()
            point_list = point_list.cuda()

        optimizer.zero_grad()
        pre_list = pre_list.to(torch.float32)
        sal_list = sal_list.to(torch.float32)
        point_list = point_list.to(torch.float32)
        # input_list = [sal_list,point_list]
        # input_list = np.stack(input_list)
        # input_list = torch.tensor(input_list).to(torch.float32)
        input_list = input_list.to(torch.float32)
        output_list, output_last = net(input_list)
        output_h = output_last[0][0]
        output_h = output_h.permute(1,0,2,3)
        output_h = torch.unsqueeze(output_h,1)
        a=output_list[0]
        # output_h = output_h*10
        pre_list = pre_list*10
        # loss=loss_function(output_h,pre_list)
        loss = loss_function(output_h, point_list)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if max_loss < loss.item():
            max_loss = loss.item()
        if min_loss > loss.item():
            min_loss = loss.item()
        if epoch % 10 == 0:
            print('[%5d] loss: %.3f' %
                (epoch + 1, loss.item()))
            # running_loss =0

    print('Finished Training')
    PATH = args.save_path + 'convlstm_net_online.pth'
    torch.save(net.state_dict(), PATH)
    return running_loss, max_loss, min_loss

def test(dataset, start):
    dataStat = []
    # torch.set_default_dtype(torch.float64)
    PATH = args.save_path + 'convlstm_net_online.pth'
    net = convlstm.ConvLSTM_CNN(input_dim=2,
                                hidden_dim=6,
                                kernel_size=(5, 5),
                                num_layers=1)
    if torch.cuda.is_available():
        net = net.cuda()

    net.load_state_dict(torch.load(PATH))
    sal_list, pre_list, point_list, input_list = sequencentialData(dataset, start, 10)
    if torch.cuda.is_available():
        input_list = input_list.cuda()

    sal_list = sal_list.to(torch.float32)
    point_list = point_list.to(torch.float32)
    input_list = input_list.to(torch.float32)
    input = sal_list[0][0]
    input = torch.squeeze(input, 0)
    input = input.detach().numpy()

    _, outputs = net(input_list)
    pre = outputs[0][0]
    pre1 = torch.squeeze(pre, 0)
    pre2 = pre1.detach().cpu().numpy()
    pre_image = pre2[0]
    p_i = point_list[4][0]
    p_i = torch.squeeze(p_i, 0)
    p_i = p_i.detach().numpy()
    args.showFig = True
    if args.showFig:
        plt.imshow(input)
        plt.show()
        plt.imshow(p_i)
        plt.show()
        plt.imshow(pre_image*5)
        plt.show()

    real = pre_list
    real = torch.squeeze(real, 1)
    real = torch.squeeze(real, 1)
    real = real.to(torch.float32)
    real1 = real.detach().numpy()
    real_image = real1[0]

    # Saliency Map
    # print(input.shape)
    # cv2.imshow('Input Saliency Map', input*200)
    # print(real_image.shape)
    # cv2.imshow('Label Saliency Map', real_image*200)

    # pre_image[pre_image<-0.03] = 1

    # Viewport Map
    # print(pre_image.shape)
    # cv2.imshow('Prediction Viewport Map', pre_image*200)
    # print(real_image.shape)
    # cv2.imshow('Label Viewport Map', p_i*200)
    #
    # # Save
    # cv2.imwrite('Input Saliency Map.jpg', input*200)
    # cv2.imwrite('Label Saliency Map.jpg', real_image*200)
    # cv2.imwrite('Prediction Viewport Map.jpg', pre_image*200)
    # cv2.imwrite('Label Viewport Map.jpg', p_i*200)
    # print("Saved")
    # cv2.waitKey(0)

    preMat = pre_image * 10
    realMat = p_i

    preMat[preMat < args.threshold] = 0
    preMat[preMat >= args.threshold] = 1

    fit = (sum(map(sum, (preMat + realMat) != 1)))
    mistake = 14400 - fit
    fetch = sum(map(sum, (preMat == 1)))
    right = sum(map(sum, (preMat + realMat) > 1))
    need = sum(map(sum, (realMat == 1)))
    wrong = fetch - right
    dataStat = [fit, mistake, fetch, wrong, right, need]

    return dataStat

if __name__ == '__main__':
    # load the settings
    args = get_args()

    for indny in range(0, 1):
        for userID in range(1, 49):#48

            FileList = ["saliency_ds2_topic0", "saliency_ds2_topic1", "saliency_ds2_topic2", "saliency_ds2_topic3", "saliency_ds2_topic4"]

            PATH1 = 'D:/VR_project/PanoSaliency/data/' + FileList[indny]
            all_dataset = datasetinput(PATH1)

            dataset_array = np.array(all_dataset, dtype=object)
            end_time = create_user_trace(dataset_array, indny, userID)

            data_input, data_input_fixation = create_saldataset(dataset_array)

            transform = transforms.Compose([transforms.ToTensor()])
            face_dataset = SaliencyDataset(saliency_map=data_input,
                                           predict_time=5,
                                           point_map=data_input_fixation,
                                           transform=transform)

            # trainloader = torch.utils.data.DataLoader(face_dataset, batch_size=4,
            #                                           shuffle=False, num_workers=2)

            dataStat = []
            losses = []
            batchTime = []
            mlosses = []
            Mlosses = []
            batch_time = AverageMeter()
            data_time = AverageMeter()

            end = time.time()

            # rand_num = random.randint(1, 2000) # [484,422,256,379,800]
            # rand_n = rand_num[indny]
            # dataStatTT = test(face_dataset, rand_num)  end_time

            for epoch in range(args.start_time, end_time-36, args.step_size):

                data_time.update(time.time() - end)

                # online train
                start = time.perf_counter()
                loss, Mloss, mloss = train(face_dataset, epoch, args.windows)
                end = time.perf_counter()
                print(end - start)

                # save the test data
                dataStatTT = test(face_dataset, epoch)
                batch_time.update(time.time() - end)
                end = time.time()

                dataStat.append(dataStatTT)
                losses.append(loss)
                mlosses.append(mloss)
                Mlosses.append(Mloss)

                batchTime.append(batch_time)

                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss:.4f} ({loss:.4f})\t'
                      'mLoss {mloss:.4f} ({mloss:.4f})\t'
                      'MLoss {Mloss:.4f} ({Mloss:.4f})\t'.format(
                    args.step_size, epoch, end_time, batch_time=batch_time,
                    data_time=data_time, loss=loss, mloss=mloss, Mloss=Mloss))

            scipy.io.savemat('coLive_online' + '_' + str(1) + '_' + str(indny) + '_' + str(userID) + '.mat',
                             mdict={'coLive_online': dataStat, 'loss': losses, 'batch_time': batchTime, 'Mlosses': Mlosses, 'mlosses': mlosses})

        # sal_list, pre_list= sequencentialData(face_dataset, 1, 10)
        # print(sal_list.shape)

# THIS IS A PIECE OF TRASH. AND THIS TRAINING CAN NOT EVEN BE CONVERGED!!!!