import pickle
import _pickle
import torch.cuda
from torch.utils.data import Dataset, DataLoader
import torch
from tensorboardX import SummaryWriter

import torchvision.transforms as transforms
import torch.nn as nn
from sklearn import preprocessing
import cv2
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import csv
from pyquaternion import Quaternion
import get_fixation
import time

# added arguments and cnn_model
from Arguments import get_args
import convlstm
import split_matrix

class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.val = 0
        self.avg = 0
        self.summ = 0
        self.count = 0

    def reset(self):
        self.val = 0
        self.avg = 0
        self.summ = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.summ += val * n
        self.count += n
        self.avg = self.summ / self.count

# 根据方向计算角度
def vector_to_ang(_v):
    # v = np.array(vector_ds[0][600][1])
    # v = np.array([0, 0, 1])
    _v = np.array(_v)
    # degree between v and [0, 1, 0]
    alpha = get_fixation.degree_distance(_v, [0, 1, 0])
    phi = 90.0 - alpha
    # proj1 is the projection of v onto [0, 1, 0] axis
    proj1 = [0, np.cos(alpha/180.0 * np.pi), 0]
    # proj2 is the projection of v onto the plane([1, 0, 0], [0, 0, 1])
    proj2 = _v - proj1
    # theta = degree between project vector to plane and [1, 0, 0]
    theta = get_fixation.degree_distance(proj2, [1, 0, 0])
    sign = -1.0 if get_fixation.degree_distance(_v, [0, 0, -1]) > 90 else 1.0
    theta = sign * theta
    return theta, phi

# 根据角度得到二维平面的视点坐标
def ang_to_geoxy(_theta, _phi, _h, _w):
    x = _h/2.0 - (_h/2.0) * np.sin(_phi/180.0 * np.pi)
    temp = _theta
    if temp < 0:
        temp = 180 + temp + 180
    temp = 360 - temp
    y = (temp * 1.0/360 * _w)
    return int(x), int(y)

# 获取观看视频用户的头部方向（针对某个视频的一个用户）
def data_prepare(idx, videoId, userId, t_list):
    Userdata = []
    UserFile = 'D:/VR_project/LiveDeep_All/vr-dataset/Experiment_' \
               + str(idx) + '/' + str(userId) + "/video_" + str(videoId) + ".csv"
    print('Load user\'s excel info from', UserFile)

    t = []
    with open(UserFile) as csvfile:
        csv_reader = csv.reader(csvfile)
        birth_header = next(csv_reader)
        i, t_len = 0, len(t_list)
        for row in csv_reader:
            if float(row[1]) >= round(t_list[i], 3):
                v0 = [0, 0, 1]
                q = Quaternion([float(row[5]), -float(row[4]), float(row[3]), -float(row[2])])
                Userdata.append(q.rotate(v0))
                t.append([float(row[2]), float(row[3]), float(row[4]), float(row[5])])
                i += 1
                if i == t_len:
                    break
    # np.savetxt(f'../clean_data/video_{str(videoId)}_{str(UserId)}.txt', np.array(t), fmt='%.3f')
    Userdata = np.array(Userdata)
    return Userdata

# 根据视点坐标生成fixation map
def create_fixation_map(_X, _y, _idx, H, W):
    v = _y[_idx]
    theta, phi  = vector_to_ang(v)
    hi, wi = ang_to_geoxy(theta, phi, H, W)
    result = np.zeros(shape=(H, W))
    result[H-hi-1, W-wi-1] = 1
    return result

def de_interpolate(raw_tensor, N):
    """
    F.interpolate(source, scale_factor=scale, mode="nearest")的逆操作！
    :param raw_tensor: [B, C, H, W]
    :param N
    :return: [B, C, H // 2, W // 2]
    """
    out = np.zeros((N, 9, 16))
    for idx in range(10):
        out = out + raw_tensor[:, idx::10, idx::10]
    return out

# 对视频的每一帧生成用户真实的fixation map (N, 90, 160) 
def create_sal_fix(dataset, videoId, userId):
    """
    :param dataset[i] = [timestamp, fixationList, saliencyMap] or dataset = all saliency_maps
    :param videoId: 视频Id
    :param userId: 用户Id
    """
    saliency_maps = dataset
    if args.salFlag == "attention":
        saliency_maps = np.array([i[2] for i in dataset])
        t_list = [i[0] for i in dataset]
    elif args.salFlag == "sample_attention":
        t_list = np.loadtxt(timeBasePath + f"{videoId}-{fileDict[videoId]}.txt")
    elif args.salFlag == "cnn_sphere":
        t_list = np.loadtxt(timeBasePath + salDict[videoId].split('.')[0] + '.txt')
    else:
        t_list = np.loadtxt(timeBasePath + f"{videoId}-{fileDict[videoId]}.txt")
        print("Warning! Please pay attention to salFlag!")
    print(f"Load timestamp from {timeBasePath}")

    # todo: 这里对 saliency_maps 归一化 先不做了
    saliency_maps = de_interpolate(saliency_maps, len(saliency_maps))
    # mmscaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    # saliency_maps = mmscaler.fit_transform(saliency_maps.ravel().reshape(-1, 1)).reshape(saliency_maps.shape)

    N, H, W = saliency_maps.shape
    series = data_prepare(1, videoId, userId, t_list)
    # series = mix_prepare(videoId, 1, 27)  # 混合原始视频帧

    mmscaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
    fixation_map = np.array([create_fixation_map(None, series, idx, H, W) for idx,_ in enumerate(series)])
    headmap = np.array([cv2.GaussianBlur(item, (gblur_size_width, gblur_size_high), 0) for item in fixation_map])
    fixation_maps = mmscaler.fit_transform(headmap.ravel().reshape(-1, 1)).reshape(headmap.shape)

    # todo: 是否应该乘以10 先不乘了
    return saliency_maps * 10, fixation_maps * 10

def create_dataset(saliency_maps, fixation_maps, predict_time):
    dataX, dataY = [], []
    for i in range(len(saliency_maps) - predict_time):
        sal_maps = saliency_maps[i:(i+predict_time)]
        fix_maps = fixation_maps[i:(i+predict_time)]
        sal_fix_list = []
        for sal, fix in zip(sal_maps, fix_maps):
            sal_fix_list.append(np.stack([sal, fix], axis=0))
        dataX.append(np.array(sal_fix_list))
        dataY.append(fixation_maps[i+predict_time])
    return np.array(dataX), np.array(dataY)


def train(inputs, labels, frameId):
    running_loss = 0
    min_loss = float('inf')
    max_loss = -float('inf')
    print(f"\nTraining...{args.learning_rate}")

    inputs = torch.from_numpy(inputs[np.newaxis,:])
    labels = torch.from_numpy(labels)
    inputs = inputs.to(torch.float32)  # windows帧的输入数据，每帧是[90, 160]，包括2个通道，1个batch
    labels = labels.to(torch.float32)
    if torch.cuda.is_available():
        inputs = inputs.cuda()
        labels = labels.cuda()

    for epoch in range(args.epoches):
        optimizer.zero_grad()

        # output_list每个隐层的预测结果
        # output_last是最后一个隐层预测的Viewport结果
        output_list, output_last, conv_output, conv_output_list_ret = net(inputs)

        # loss = loss_function(conv_output_list_ret, labels)
        loss = loss_function(conv_output[0][0, 0], labels)
        loss.backward()
        optimizer.step()

        writerLoss.add_scalar('Loss', loss.item(), global_step=epoch, walltime=time.time())

        # if epoch == args.epoches // 2:
        #     args.learning_rate *= 0.1

        running_loss = loss.item()
        max_loss = max(max_loss ,loss.item())
        min_loss = min(min_loss, loss.item())
        if (epoch+1) % 1 == 0:
            # print('[%5d] loss: %.6f' %(epoch, loss.item()))
            print('\r[%d]/[%d] trainLoss: %.6f' %(epoch+1, args.epoches, loss.item()), end='')

    writerLoss.close()
    torch.save(net.state_dict(), modelPath)
    print(f"\nTrain frameId={frameId}~{frameId+args.windows}",
          "Updated model have been saved to", modelPath)
    # print('Updated model have been saved to', modelPath)
    return max_loss, min_loss, running_loss

def test(inputs, labels, frameId, startTime, writer):
    """
    :param dataset: test dataset
    :param frameId: start frame's Id
    :param startTime: test start timestamp
    :param writer: log writer
    """

    global pre_image, real_image, good_test_frame, total_test_frame

    inputs = torch.from_numpy(inputs[np.newaxis, :])
    labels = torch.from_numpy(labels)
    inputs = inputs.to(torch.float32)  # windows帧的输入数据，每帧是[90, 160]，包括2个通道，1个batch
    labels = labels.to(torch.float32)
    if torch.cuda.is_available():
        inputs = inputs.cuda()
        labels = labels.cuda()

    output_list, output_last, conv_output, conv_output_list_ret = net(inputs)

    for i in range(1):
        try:
            # pre_image = conv_output_list_ret[0, i, 0].detach().cpu().numpy()
            pre_image = conv_output[0][0, 0].detach().cpu().numpy()
            real_image = labels.detach().cpu().numpy()
        except IndexError:
            print('i=', i, conv_output_list_ret.shape)

        # pre_image[pre_image < args.threshold] = 0
        # pre_image[pre_image > args.threshold] = 1
        real_image[real_image < args.threshold] = -1
        real_image[real_image > args.threshold] = 1

        if args.showImage:
            plt.imshow(pre_image)
            plt.axis('off')
            plt.title(f'pre_image[{frameId + i}]')
            plt.show()

            plt.imshow(real_image)
            plt.axis('off')
            plt.title(f'real_image[{frameId + i}]')
            plt.show()

        # pre_image = split_matrix.return_matrix(pre_image)
        # real_image = split_matrix.return_real_matrix(real_image)
        compensate = 1e-2

        fit = sum(map(sum, (pre_image + real_image) != 1))  # 统计预测的map和实际的map相同状态的块有多少
        mistake = pre_image.size - fit
        fetch = sum(map(sum, (pre_image == 1)))             # 统计预测的map中块的数量：用于带宽计算
        need = sum(map(sum, (real_image == 1)))             # 统计实际的map中为1的块数量
        right = sum(map(sum, (pre_image + real_image) > 1)) # 统计预测的map和真实的map的同为1的块数量
        wrong = fetch - right

        tileAccuracy = round(fit / real_image.size, 4)
        recall = round((right + compensate) / (need + compensate), 4)
        precision = round((right + compensate) / (fetch + compensate), 4)
        if recall >= thres_recall:
            good_test_frame += 1
        total_test_frame += 1
        metrics = [frameId + i, fit, mistake, fetch, need, right, wrong, tileAccuracy, recall, precision]
        if right == need:
            metrics.append(True)
        else:
            metrics.append(False)

        tileAccList.append(tileAccuracy)
        recallList.append(recall)
        precisionList.append(precision)

        writer.writerow(metrics)

        if (i + 1) % 1 == 0:
            # print('Frame [{}/{}], Loss: {:.5f}'
            #       .format(i + 1 + frameId, total_frame, loss_function(conv_output_list_ret, labels).item()))
            print('Frame [{}/{}], Loss: {:.5f}'
                  .format(i + 1 + frameId, total_frame, loss_function(conv_output[0][0, 0], labels).item()))
    endTime = time.time()

    print(f'Test frameId={frameId}~{frameId + args.windows} finished, time: {round(endTime - startTime, 3)}')

if __name__ == '__main__':
    # load the settings
    args = get_args()
    # 高斯核大小, ksize.width and ksize.height 可以有所不同，但它们都必须是正数和奇数
    gblur_size_width = args.gblur_size_width
    gblur_size_high = args.gblur_size_high

    net = convlstm.ConvLSTM_CNN(input_dim=2, hidden_dim=args.hidden_dim, kernel_size=args.kernel_size,
                                num_layers=args.num_layers, batch_first=args.batch_first)
    print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))
    net.initialize_weight()
    if torch.cuda.is_available():
        net = net.cuda()

    loss_function = nn.MSELoss()
    # optimizer = torch.optim.SGD(net.parameters(), lr=0.0001, momentum=0.8)
    optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate, betas=(0.9, 0.99))
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)

    fileDict = {0: 'Conan1', 1: 'Skiing', 2: 'Alien', 3: 'Conan2', 4: 'Surfing',
                5: 'War', 6: 'Cooking', 7: 'Football', 8: 'Rhinos'}
    salFileList = ["saliency_ds2_topic0", "saliency_ds2_topic1", "saliency_ds2_topic2",
                   "saliency_ds2_topic3", "saliency_ds2_topic4", "saliency_ds2_topic5",
                   "saliency_ds2_topic6", "saliency_ds2_topic7", "saliency_ds2_topic8"]  # attention数据集
    salDict = {0: '1-1-Conan Gore Fly.npy', 1: '1-2-Front.npy', 2: '1-3-360 Google Spotlight Stories_ HELP.npy',
               3: '1-4-Conan Weird Al.npy', 4: '1-5-TahitiSurf.npy', 5: '1-6-Falluja.npy',
               6: '1-7-Cooking Battle.npy', 7: '1-8-Football.npy', 8: '1-9-Rhinos.npy'} # 使用s2cnn训练的数据集

    timeBasePath = 'D:/VR_project/ViewPrediction/frames/timeStamp/'

    if args.salFlag == "cnn_sphere":
        salBasePath = 'D:/VR_project/ViewPrediction/frames/saliency/'
    elif args.salFlag == "sample_attention":
        salBasePath = 'D:/VR_project/ViewPrediction/frames/attentionSaliency/'
        timeBasePath = 'D:/VR_project/ViewPrediction/frames/attentionTimeStamp/'
    elif args.salFlag == "attention":
        salBasePath = 'D:/VR_project/PanoSaliency/data/'
    else:
        salBasePath = 'D:/VR_project/ViewPrediction/frames/attentionSaliency/'
        timeBasePath = 'D:/VR_project/ViewPrediction/frames/attentionTimeStamp/'
        print("Warning! Please choose right saliency dataset! Default: sample_attention!")

    idx = 6
    for videoId in range(idx, idx+1):   # 视频Id videoId
        if videoId == 7:
            continue
        for userId in range(3, 49):     # 用户Id userId

            modelPath = args.model_path + f"convlstm_online_{fileDict[videoId].lower()}_{userId}.pth"

            if args.salFlag == "cnn_sphere":
                saliencyPath = salBasePath + salDict[videoId]
            elif args.salFlag == "attention":
                saliencyPath = salBasePath + salFileList[videoId]
            else:
                saliencyPath = salBasePath + f"{videoId}-{fileDict[videoId]}.npy"

            try:
                saliency_array = np.array(pickle.load(open(saliencyPath, 'rb'), encoding='bytes'), dtype=object)
            except _pickle.UnpicklingError:
                saliency_array = np.load(saliencyPath, allow_pickle=True)
            total_frame = len(saliency_array)
            print(f"Load {total_frame} sal_maps, saliency dataset from {saliencyPath}")

            saliency_maps, fixation_maps = create_sal_fix(saliency_array, videoId, userId)

            transform = transforms.Compose([transforms.ToTensor()])
            sal_fix_maps, label_maps = create_dataset(saliency_maps, fixation_maps, args.windows)

            pre_image, real_image = None, None
            good_test_frame, total_test_frame = 0, 0
            tileAccList, recallList, precisionList = [], [], []
            thres_recall = 0.6
            start_time = time.time()

            writerLoss = SummaryWriter('runs/loss')

            logName = args.log_path + f"{fileDict[videoId]}_{userId}.csv"
            with open(logName, 'w', newline='') as f:
                logWriter = csv.writer(f, dialect='excel')
                logWriter.writerow(['FrameId', 'Match', 'Mistake', 'PredictTile', 'RealTile',
                                    'RightTile', 'RedundantTile', 'Acc', 'Recall', 'Preicse', 'Frame'])

                for frame_id in range(args.windows, total_frame - 2 * args.windows, args.windows):
                    # net.initialize_weight()
                    window_start_time = time.time()

                    train(sal_fix_maps[frame_id], label_maps[frame_id], frame_id)
                    test(sal_fix_maps[frame_id], label_maps[frame_id], frame_id, window_start_time, logWriter)

                end_time = time.time()
                logWriter.writerows([
                    ['total_time', end_time - start_time],
                    ['total_test_frame', total_test_frame],
                    ['good_test_frame', good_test_frame],
                    ['threshold_recall', thres_recall],
                    ['FrameAccuracy', round(good_test_frame / total_test_frame, 4)],
                    ['AverageTileAccuracy', np.mean(tileAccList)],
                    ['AverageRecall', np.mean(recallList)],
                    ['AveragePrecision', np.mean(precisionList)]
                ])

            print(f'Video={fileDict[videoId]}, UserId={userId} finished!')
            break