import bisect
import os
import cv2
import csv
import time
import math
import cmath
import random
import _pickle
import pickle
import torch
import convlstm
import bisect

import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import torchvision.transforms as transforms

from torch.utils.data import Dataset, DataLoader
from sklearn import preprocessing
from pyquaternion import Quaternion
from Arguments import get_args


def index_to_xyz(pre_image: np.ndarray) -> ():
    H, W = pre_image.shape
    pos = np.argmax(pre_image)
    x, y = divmod(pos, W)   # pre_image: the index of max number
    point = [(H-x-1) % H, (W-y-1) % W]

    phi = np.arcsin(1 - 2/H * point[0])
    temp = 360 / W * point[1]
    theta = 360 - temp

    dx = np.cos(phi/180.0 * np.pi) * np.cos(theta/180.0 * np.pi)
    dy = np.sin(phi/180.0 * np.pi)
    dz = np.cos(phi/180.0 * np.pi) * np.sin(theta/180.0 * np.pi)

    return dx, dy, dz

def get_index_1x1CMP(dxyz) -> list:
    tiles = [0, 0, 0, 0, 0, 0]
    limit = cmath.sqrt(2) / 2

    x, y, z = dxyz
    if 1 >= x >= limit >= y >= -limit and -limit <= z <= limit:
        tiles[0] = 1
    elif -1 <= x <= -limit <= y <= limit and -limit <= z <= limit:
        tiles[1] = 1
    elif -limit <= x <= limit <= y <= 1 and -limit <= z <= limit:
        tiles[2] = 1
    elif limit >= x >= -limit >= y >= -1 and -limit <= z <= limit:
        tiles[3] = 1
    elif limit >= x >= -limit >= z >= -1 and -limit <= y <= limit:
        tiles[4] = 1
    elif -limit <= x <= limit <= z <= 1 and -limit <= y <= limit:
        tiles[5] = 1
    else:
        print("index out!!!")
    return tiles

def get_index_2x2CMP(directions: np.ndarray) -> list:
    pass

# CALCULATE DEGREE DISTANCE BETWEEN TWO 3D VECTORS
def unit_vector(vector):
    return vector / np.linalg.norm(vector)

def degree_distance(v1, v2):
    v1_u = unit_vector(v1)
    v2_u = unit_vector(v2)
    return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))/np.pi * 180

# 根据方向计算角度
def vector_to_ang(_v):
    _v = np.array(_v)
    # degree between v and [0, 1, 0]
    alpha = degree_distance(_v, np.array([0, 1, 0]))
    phi = 90.0 - alpha
    # proj1 is the projection of v onto [0, 1, 0] axis
    proj1 = [0, np.cos(alpha/180.0 * np.pi), 0]
    # proj2 is the projection of v onto the plane([1, 0, 0], [0, 0, 1])
    proj2 = _v - proj1
    # theta = degree between project vector to plane and [1, 0, 0]
    theta = degree_distance(proj2, np.array([1, 0, 0]))
    sign = -1.0 if degree_distance(_v, np.array([0, 0, -1])) > 90 else 1.0
    theta = sign * theta
    return theta, phi

# 根据角度得到二维平面的视点坐标
def ang_to_geoxy(_theta, _phi, _h, _w):
    x = _h/2.0 - (_h/2.0) * np.sin(_phi/180.0 * np.pi)
    temp = _theta
    if temp < 0:
        temp = 360 + temp
    temp = 360 - temp
    y = (temp * 1.0/360 * _w)
    return int(x), int(y)


# 根据视点坐标生成fixation map
def create_fixation_map(viewData, idx, H, W):
    v = viewData[idx]
    theta, phi  = vector_to_ang(v)
    hi, wi = ang_to_geoxy(theta, phi, H, W)
    result = np.zeros(shape=(H, W))
    result[H-hi-1, W-wi-1] = 1
    return result

# 根据视点坐标生成fixation map
def de_interpolate(raw_tensor, N):
    """
    F.interpolate(source, scale_factor=scale, mode="nearest")的逆操作！
    :param raw_tensor: [B, C, H, W]
    :param N
    :return: [B, C, H // 2, W // 2]
    """
    out = np.zeros((N, 9, 16))
    for idx in range(10):
        out = out + raw_tensor[:, idx::10, idx::10]
    return out

def return_fov(predict_array):
    """
    :param predict_array: predict the array of the image
    :return: (x, y, z) and tiles
    """
    pre_image = predict_array[0, 0].detach().cpu().numpy()
    # pre_image[pre_image < args.threshold] = 0
    # pre_image[pre_image > args.threshold] = 1

    dxyz = index_to_xyz(pre_image)
    tiles = get_index_1x1CMP(dxyz)

    if args.show_image:
        plt.imshow(pre_image)
        plt.axis('off')
        plt.show()
    return dxyz, tiles



def get_sal_fix(time_array, saliency_maps, req, window=16, time_window=2):
    """
    :param time_array: the timestamp of saliency maps
    :param saliency_maps: the saliency maps of video
    :param req: past timestamp and viewport direction
    :param window: predict window size
    :param time_window: predict timestamp
    :return: sal_fix_maps to ConvLSTM, predicted time
    """
    # req = [{'time': xx, 'x': xx, 'y': xx, 'z': xx}, ...]
    past_time = [float(i['time']) for i in req]
    past_pos = [[i['x'], i['y'], i['z']] for i in req]

    if len(past_pos) < window:
        past_pos.extend([past_pos[-1]] * (window - len(past_pos)))

    idx_start = bisect.bisect(time_array, past_time[0])
    if len(time_array) - idx_start < window:
        idx_list = list(range(idx_start, len(time_array)))
        idx_list.extend([idx_list[-1]] * (window - len(idx_list)))
    else:
        idx_list = list(range(idx_start, idx_start + 16))

    sal_maps = saliency_maps[idx_list]
    mmscaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
    sal_maps = mmscaler.fit_transform(sal_maps.ravel().reshape(-1, 1)).reshape(sal_maps.shape)

    N, H, W = saliency_maps.shape
    mmscaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
    fixation_maps = np.array([create_fixation_map(past_pos, idx, H, W) for idx, _ in enumerate(past_pos)])
    headmap = np.array(
        [cv2.GaussianBlur(item, (args.gblur_size_width, args.gblur_size_high), 0) for item in fixation_maps])
    fix_maps = mmscaler.fit_transform(headmap.ravel().reshape(-1, 1)).reshape(headmap.shape)

    assert sal_maps.shape == fix_maps.shape, "sal_maps.shape != fix_maps.shape"
    sal_fix_list = []
    for i in range(len(fix_maps)):
        sal_fix_list.append(np.stack([sal_maps[i], fix_maps[i]]))
    sal_fix_maps = np.stack(sal_fix_list)
    inputs = torch.from_numpy(sal_fix_maps[np.newaxis, :, :, :, :]).to(torch.float32)
    if torch.cuda.is_available():
        inputs = inputs.cuda()

    return inputs, past_time[-1] + time_window


# load the settings
args = get_args()
videoList = ['Skiing', 'Conan1', 'Alien', 'Conan2', 'Surfing', 'War', 'Cooking', 'Football', 'Rhinos']
salFileList = ["saliency_ds2_topic1", "saliency_ds2_topic0", "saliency_ds2_topic2",
               "saliency_ds2_topic3", "saliency_ds2_topic4", "saliency_ds2_topic5",
               "saliency_ds2_topic6", "saliency_ds2_topic7", "saliency_ds2_topic8"]


start_loadSal = time.time()
time_array_list = []
sal_maps_list = []
for item in salFileList:
    salPath = args.sal_path + item
    try:
        saliency_array = np.array(pickle.load(open(salPath, 'rb'), encoding='bytes'), dtype=object)
    except _pickle.UnpicklingError:
        saliency_array = np.load(salPath, allow_pickle=True)

    time_array_list.append(np.array([i[0] for i in saliency_array]))
    sal_array = np.array([i[2] for i in saliency_array])
    sal_maps_list.append(de_interpolate(sal_array, len(sal_array)))
    # 目前只针对 Skiing 视频
    if item == "saliency_ds2_topic1":
        break
end_loadSal = time.time()
print("Load saliency maps time:", round(end_loadSal - start_loadSal, 3))


