from __future__ import print_function
#%matplotlib inline
import argparse
import os
from pathlib import Path
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import scipy.io
from scipy.interpolate import make_interp_spline, interp1d
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
import matplotlib.cm as cm
import matplotlib
import seaborn as sns
# matplotlib.use('TkAgg')
from scipy import ndimage
import scipy.stats as st

import utils
import copy
import cv2
import math
import operator
'''
Basic inference function.
Parameters:
  image_path = Directory path for the image to be tested.
  generator = Generator's model
  device = Device to run the inference on (only supports GPU at the moment)
  path_to_save = Directory path where results will be saved
  n_generated = Number of scanpaths to generate
'''


def my_inference(image_path, generator, device, n_generated=50):

    # Set generator to eval mode
    generator.eval()
    # Create the path if does not exist

    # Load image
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Resize once to adjust to gaze point ratio
    original_h, original_w, _ = image.shape

    # Resize image
    image = cv2.resize(image, (utils.image_size[1], utils.image_size[0]),
                       interpolation=cv2.INTER_AREA)
    image = image.astype(np.float32) / 255.0

    # Normalize image
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize([0.5], [0.5])])

    # Number of batch (in this case, there is only one)
    n_batch = 0

    print("* [Batch %d] Data ready" % (n_batch))

    # Predict scanpaths and compute saliency
    _generated_scanpaths = []
    for n in range(n_generated):
        # Generate random noise from -1 to 1.
        noise = 2 * torch.randn(1, utils.random_z, 1, 1,
                                device=device).squeeze() - 1
        with torch.no_grad():
            fake_latlon = generator(transform(image)[None, :, :, :].to(device),
                                    noise,
                                    1,
                                    debug=False)
            fake_latlon = fake_latlon.detach().cpu().squeeze()
            # We are saving both the results in the range of 0-1 and 0-image_size
            _fake = []
            _n = []
            for i in range(0, len(fake_latlon), 3):
                lat = np.arctan2(
                    fake_latlon[i + 2],
                    np.sqrt(fake_latlon[i]**2 + fake_latlon[i + 1]**2))
                lon = np.arctan2(fake_latlon[i + 1], fake_latlon[i])
                # From lat-lon to x,y
                y = ((lat / (np.pi / 2) + 1)) / 2
                x = ((lon / np.pi) + 1) / 2
                # Save results in image space
                _fake.append(x * utils.image_size[1])
                _fake.append(y * utils.image_size[0])
            _generated_scanpaths.append(_fake)

    points=[]
    for j in range(len(_generated_scanpaths)):
        for i in range(0, len(_generated_scanpaths[j]), 2):
            points.append([int(_generated_scanpaths[j][i]), int(_generated_scanpaths[j][i + 1])])
    
    return points

