import os
import csv
import cv2
import math
import time
import random
import pickle
import _pickle
import numpy as np

import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader

from sklearn import preprocessing
import matplotlib.pyplot as plt
from Arguments import get_args
from SphereConvLSTM import SCLNet
from ConvLSTM import ConvLSTMNet


def de_interpolate(raw_tensor, N):
    """ F.interpolate(source, scale_factor=scale, mode="nearest")的逆操作！

    :param raw_tensor: [B, C, H, W]
    :param N
    :return: [B, C, H // 2, W // 2]
    """
    out = torch.zeros((N, 9, 16))
    for idx in range(10):
        out = out + raw_tensor[:, idx::10, idx::10]
    return out / 10


class FoVDataset(Dataset):
    def __init__(self, fixation_maps, frame_path, predict_time, transform=None):
        self.fixation_maps = fixation_maps
        self.len = len(fixation_maps)
        self.frame_path = frame_path
        self.predict_time = predict_time
        self.transform = transform

    def __len__(self):
        return self.len

    def __getitem__(self, idx):
        frame = cv2.imread(self.frame_path + f'/{idx+1}.jpg')
        if idx < self.len - self.predict_time:
            fixation = self.fixation_maps[idx]
            predict_fixation = self.fixation_maps[idx + self.predict_time]
        else:
            fixation = self.fixation_maps[idx]
            predict_fixation = self.fixation_maps[idx]

        if self.transform:
            frame = self.transform(frame)
            fixation = self.transform(fixation)
            predict_fixation = self.transform(predict_fixation)

            frame = de_interpolate(frame, len(frame))
            fixation = de_interpolate(fixation, len(fixation))
            predict_fixation = de_interpolate(predict_fixation, len(predict_fixation))

        return frame, fixation, predict_fixation


def sequentialData(dataset, idx, windows):
    pre_fix_list = []
    mix_list = []
    for j in range(windows):
        frame, fix, pre_fix = dataset[idx + j]
        pre_fix_list.append(pre_fix)
        mix_frame_fix = torch.cat([frame, fix], dim=0)
        mix_list.append(mix_frame_fix)

    pre_fix_output = torch.stack(pre_fix_list, dim=0)
    mix_output = torch.stack(mix_list, dim=0)

    pre_fix_output = torch.unsqueeze(pre_fix_output, 0)
    mix_output = torch.unsqueeze(mix_output, 0)

    return mix_output, pre_fix_output

def train(dataset, frameId):
    loss = 0
    print(f"Train frameId={frameId}~{frameId+args.windows}...", end='\t')
    for epoch in range(args.epochs):

        inputs, labels = sequentialData(dataset, frameId, args.windows)
        if torch.cuda.is_available():
            inputs = inputs.cuda()
            labels = labels.cuda()

        optimizer.zero_grad()
        inputs = inputs.to(torch.float32)   # windows帧的输入数据，每帧是[90, 160]，包括2个通道，1个batch
        labels = labels.to(torch.float32)   # windows帧的真实数据，每帧是[90, 160]，就1个通道，1个batch

        predict = net(inputs)

        loss = loss_function(predict, labels[:,-1,:,:,:])
        loss.backward()
        optimizer.step()

        # scheduler.step(loss)    # 动态调整学习率
        if (epoch+1) % 2 == 0:
            print('\rTrain frameId=[%d]~[%d]...[%3d]/[%d] loss: %.6f' %
                  (frameId, frameId+args.windows, epoch+1, args.epochs, loss.item()), end='')
            # print('[%d/%d] loss: %.6f' % (epoch+1, args.epochs, loss.item()))

    return loss.item()

def test(dataset, frameId, startTime, threshold):
    global good_test_frame, total_test_frame

    inputs, labels = sequentialData(dataset, frameId, args.windows)
    if torch.cuda.is_available():
        inputs = inputs.cuda()

    with torch.no_grad():
        if torch.cuda.is_available():
            inputs = inputs.cuda()
            labels = labels.cuda()

        inputs = inputs.to(torch.float32)
        labels = labels.to(torch.float32)

        predict = net(inputs)

        for j in range(args.windows-1, args.windows):
            try:
                pre_image = predict[0, 0].detach().cpu().numpy()
                real_image = labels[0, j, 0].detach().cpu().numpy()
            except IndexError:
                print('j=', j, predict.shape)

            pre_image[pre_image < threshold] = 0
            pre_image[pre_image > threshold] = 1
            real_image[real_image < threshold] = 0
            real_image[real_image > threshold] = 1

            if args.showImage:
                plt.imshow(pre_image)
                plt.axis('off')
                plt.title(f'pre_image[{frameId+j}]')
                plt.show()

                plt.imshow(real_image)
                plt.axis('off')
                plt.title(f'real_image[{frameId+j}]')
                plt.show()

            fit = sum(map(sum, (pre_image + real_image) != 1))  # 统计预测的map和实际的map相同状态的块有多少
            mistake = pre_image.size - fit
            fetch = sum(map(sum, (pre_image == 1)))  # 统计预测的map中块的数量：用于带宽计算？
            need = sum(map(sum, (real_image == 1)))  # 统计实际的map中为1的块数量
            right = sum(map(sum, (pre_image + real_image) > 1))  # 统计预测的map和真实的map的同为1的块数量
            wrong = fetch - right
            TP = right
            FP = fetch - right
            FN = need - right
            TN = pre_image.size - fetch - need + right
            TPR = round(TP/(TP+FN), 3)
            FPR = round(FP/(FP+TN), 3)
            if fetch == 0:
                print("fetch==0")

            eps = 1e-3
            tileAccuracy = round(fit / real_image.size, 4)
            recall = round((right + eps) / (need + eps), 4)
            precision = round((right + eps) / (fetch + eps), 4)
            if recall >= thres_recall:
                good_test_frame += 1
            total_test_frame += 1
            metrics = [frameId+j, fit, mistake, fetch, need, right, wrong, tileAccuracy, recall, precision, TPR, FPR]

            tileAccList.append(tileAccuracy)
            recallList.append(recall)
            precisionList.append(precision)
            TPRList.append(TPR)
            FPRList.append(FPR)

            logWriter.writerow(metrics)

    endTime = time.time()
    timeList.append(round(endTime - startTime, 3))
    print(f'\nTest frameId={frameId}~{frameId+args.windows} finished, time: {round(endTime - startTime, 3)}')

if __name__ == '__main__':
    # load the settings
    args = get_args()

    # net = SCLNet(input_dim=args.input_size, hidden_dim=args.hidden_size,
    #                      kernel_size=(3, 3), num_layers=args.num_layers, batch_first=True)
    net = ConvLSTMNet(input_dim=args.input_size, hidden_dim=args.hidden_size,
                 kernel_size=(3, 3), num_layers=args.num_layers, batch_first=True)
    print("Total number of parameters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))

    if torch.cuda.is_available():
        net = net.cuda()

    loss_function = nn.MSELoss(reduction='mean')
    # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[args.epochs // 2, args.epochs], gamma=0.1)
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)

    fileList = ['1-1-Conan Gore Fly', '1-2-Front', '1-3-360 Google Spotlight Stories_ HELP',
                '1-4-Conan Weird Al', '1-5-TahitiSurf', '1-6-Falluja',
                '1-7-Cooking Battle', '1-8-Football', '1-9-Rhinos']
    fileDict = {0: 'Conan1', 1: 'Skiing', 2: 'Alien', 3: 'Conan2', 4: 'Surfing',
                5: 'War', 6: 'Cooking', 7: 'Football', 8: 'Rhinos'}

    frameBase = 'D:/VR_project/ViewPrediction/frames/'
    FoVPath = 'D:/VR_project/ViewPrediction/frames/timeStampFoV/'

    idx = 1
    for videoId in range(idx, 2):
        print('#'*30, fileDict[videoId], '#'*30)
        video_time = 0
        if videoId == 7:
            continue
        for userId in range(8, 9):
            print('*'*20, f'test video_{videoId}...for user={userId}', '*'*20)
            optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate, betas=(0.9, 0.99))
            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')

            transform = transforms.Compose([transforms.ToTensor()])
            max_min_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
            init_fix_maps = np.load(f'{FoVPath}{fileDict[videoId]}/{userId}.npy', allow_pickle=True)
            headmap = np.array(
                [cv2.GaussianBlur(item, (args.blur_size_width, args.blur_size_high), 0) for item in init_fix_maps])
            fixation_maps = max_min_scaler.fit_transform(headmap.ravel().reshape(-1, 1)).reshape(headmap.shape)
            framePath = frameBase + fileList[videoId]
            fov_dataset = FoVDataset(fixation_maps, framePath, args.windows, transform)
            x = fov_dataset[0]
            total_frame = len(fov_dataset)
            print(f"Total_frame={total_frame}")

            # ROC threshold
            threshold = [item / 100 for item in list(range(0, 100, 2))]

            modelPath = args.save_path + f'{fileDict[videoId]}.pth'
            log_path = args.log_path + f'{fileDict[videoId]}/thres'
            if not os.path.exists(log_path):
                os.mkdir(log_path)

            for thres in threshold:
                logName = log_path + f"/{userId}_{thres}.csv"

                with open(logName, 'w', newline='') as f:
                    logWriter = csv.writer(f, dialect='excel')
                    logWriter.writerow(['FrameId', 'Match', 'Mistake', 'PredictTile', 'RealTile',
                                        'RightTile', 'RedundantTile', 'Acc', 'Recall', 'Precise',
                                        'TPR', 'FPR'])

                    total_test_frame = 0
                    good_test_frame = 0
                    tileAccList, recallList, precisionList = [], [], []
                    TPRList, FPRList = [], []
                    thres_recall = 0.6
                    start_time = time.time()
                    timeList = []

                    net.initialize_weight()
                    for frame_id in range(args.windows, total_frame-2*args.windows):
                        window_start_time = time.time()

                        # online train
                        loss = train(fov_dataset, frame_id)
                        if loss > 0.0001:
                            net.initialize_weight()
                        test(fov_dataset, frame_id+args.windows, window_start_time, thres)

                    end_time = time.time()
                    user_time = round(end_time - start_time, 5)
                    video_time += end_time - start_time
                    logWriter.writerows([
                        ['total_time', user_time],
                        ['average_time', np.mean(timeList)],
                        ['total_test_frame', total_test_frame],
                        ['good_test_frame', good_test_frame],
                        ['threshold_recall', thres_recall],
                        ['FrameAccuracy', round(good_test_frame / total_test_frame, 4)],
                        ['AverageTileAccuracy', np.mean(tileAccList)],
                        ['AverageRecall', np.mean(recallList)],
                        ['AveragePrecision', np.mean(precisionList)],
                        ['AverageTPR', np.mean(TPRList)],
                        ['AverageFPR', np.mean(FPRList)]
                    ])

                print(f'Test video={fileDict[videoId]} for userId={userId} finished, time={user_time}s')
            # break
        print(f'Test video_{videoId} finished, time={round(video_time, 4)}s')
