import pickle
from numpy.lib.utils import info
import skvideo
# skvideo.setFFmpegPath('./ffmpeg-N-99888-g5c7823ff1c-win64-gpl/bin')
import skvideo.io
from PIL import Image
import math
import scipy.io
import torch
import numpy as np
import random
from torch._C import _llvm_enabled
from torch.utils import data
from torch.utils.data import Dataset, dataset
from torchvision import transforms, models
import os
import pandas as pd
from torchvision.io import read_image
import json

from utils import calcSpectrogram, get_audio_features, get_video_features


os.environ['CUDA_VISIBLE_DEVICES'] = '8'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class AttentionDataset(Dataset):
    def __init__(self, annotations_file, transform=None, target_transform=None):
        '''
        annotations_file: json
        '''
        with open(annotations_file, 'rb') as f:
            data = pickle.load(f)
        self.annotations_file = data

    def __len__(self):
        return len(self.annotations_file)

    def __getitem__(self, idx):
        REFERENCE_PATH = "/DATA7_DB7/data/weixionglin/LIVE-SJTU_AVQA/Reference/"
        DISTORTED_PATH = "/DATA7_DB7/data/weixionglin/LIVE-SJTU_AVQA/Distorted/"
        video_height, video_width = 1080, 1920
        info = self.annotations_file[str(idx)]
        ref_video_path = REFERENCE_PATH + info["ref_video"]
        ref_audio_path = REFERENCE_PATH + info["ref_audio"]
        dis_video_path = DISTORTED_PATH + info["dis_video"]
        dis_audio_path = DISTORTED_PATH + info["dis_audio"]

        # Video data pre-processing
        ref_video_data = skvideo.io.vread(ref_video_path, video_height, video_width, inputdict={'-pix_fmt': 'yuvj420p'})
        dis_video_data = skvideo.io.vread(dis_video_path, video_height, video_width, inputdict={'-pix_fmt': 'yuvj420p'})

        transform = transforms.Compose([
            transforms.ToTensor(),
        ])

        video_length = 192 #dis_video_data.shape[0]
        video_channel = dis_video_data.shape[3]
        video_height = dis_video_data.shape[1]
        video_width = dis_video_data.shape[2]
        transformed_dis_video = torch.zeros([video_length, video_channel, video_height, video_width])
        transformed_ref_video = torch.zeros([video_length, video_channel, video_height, video_width])

        for frame_idx in range(192):
            dis_frame = dis_video_data[frame_idx]
            dis_frame = Image.fromarray(dis_frame)
            dis_frame = transform(dis_frame)
            transformed_dis_video[frame_idx] = dis_frame

            ref_frame = ref_video_data[frame_idx]
            ref_frame = Image.fromarray(ref_frame)
            ref_frame = transform(ref_frame)
            transformed_ref_video[frame_idx] = ref_frame
        dis_patch = math.ceil(video_height/1000)*100
        #print('Extract Video length: {}'.format(transformed_dis_video.shape[0]))

        # Crop image patches
        patchSize = 224
        position_width = []
        position_height = []
        for h in range(0, video_height, dis_patch):
            if h < video_height - patchSize + 1:
                for w in range(0, video_width, dis_patch):
                    if w < video_width - patchSize:
                        position_height.append(h)
                        position_width.append(w)
                    else:
                        position_height.append(h)
                        position_width.append(video_width - patchSize)
                        break
            else:
                for w in range(0, video_width, dis_patch):
                    if w < video_width - patchSize:
                        position_height.append(video_height - patchSize)
                        position_width.append(w)
                    else:
                        position_height.append(video_height - patchSize)
                        position_width.append(video_width - patchSize)
                        break
                break

        # Video feature extraction
        position = [position_height, position_width]

        # Using saliency detection results from sal_position.m
        SDdatainfo = './demo/test_position.mat'
        SDInfo = scipy.io.loadmat(SDdatainfo)
        sal_index = SDInfo['sort_frame'] - 1

        dis_video_features, ref_video_features = get_video_features(transformed_dis_video, transformed_ref_video, position, sal_index, device)
        # print(dis_video_features.to('cpu').numpy()-np.load('../CNN_features_SJTU/skip2_SD_BigGreenRabbit_QP35.yuv_res5.npy')[96:,:4096])

        # Audio data preparation
        transform = transforms.Compose([
            transforms.ToTensor(),
        ])
        [dis_S, dis_T] = calcSpectrogram(dis_audio_path)
        transforms_dis_audio = transform(dis_S)
        [ref_S, ref_T] = calcSpectrogram(ref_audio_path)
        transforms_ref_audio = transform(ref_S)
        frame_rate = 29.97
        dis_audio_features = get_audio_features(transforms_dis_audio, dis_T, frame_rate, video_length, device)
        ref_audio_features = get_audio_features(transforms_ref_audio, ref_T, frame_rate, video_length, device)

        # Quality prediction using ANNAVQA
        seg_num = 4
        tmp_video_length = 96
        min_audio_len = 96
        feat_dim = 4096
        seg_video_len = int(tmp_video_length / seg_num)  # 24
        seg_audio_len = int(min_audio_len / seg_num)
        length = np.zeros((1, 1))
        length[0] = seg_video_len + seg_audio_len  # 48
        length = torch.from_numpy(length).float()

        with torch.no_grad():
            for seg_index in range(seg_num):
                # features = torch.zeros(1, seg_video_len + seg_audio_len, feat_dim, device=device)
                video_features = abs(dis_video_features[seg_video_len * seg_index:seg_video_len * (seg_index + 1), :feat_dim] -
                    ref_video_features[seg_video_len * seg_index: seg_video_len * (seg_index + 1), :feat_dim])
                audio_features = abs(ref_audio_features[seg_audio_len * seg_index:seg_audio_len * (seg_index + 1), :feat_dim]
                                    - dis_audio_features[seg_audio_len * seg_index:seg_audio_len * (seg_index + 1), :feat_dim])
                Feature = torch.cat([video_features.float(), audio_features.float()], axis=0)
                # features[0, :Feature.shape[0], :] = Feature
                # y_pred = y_pred + model(features, length, seg_video_len).to('cpu').numpy()

        label = info['MOS']
        return Feature, label, length, seg_video_len


if __name__ == '__main__':
    print('dataset')
    trainset = AttentionDataset('train.pkl')
    print(trainset, len(trainset))
    print(next(iter(trainset)))
