import datetime
import os.path
import random
import sys
import time

import numpy as np
import cv2
import torch
# from visdom import Visdom
import torch.nn.functional as F
import yaml
from utils.CommonUtils import check_create_dir, get_current_timestamp
import csv
from torch.autograd import Variable


class Resize():
    def __init__(self, size_tuple, use_cv=True):
        self.size_tuple = size_tuple
        self.use_cv = use_cv

    def __call__(self, tensor):
        """
            Resized the tensor to the specific size

            Arg:    tensor  - The torch.Tensor obj whose rank is 4
            Ret:    Resized tensor
        """
        tensor = tensor.unsqueeze(0)

        tensor = F.interpolate(tensor, size=[self.size_tuple[0], self.size_tuple[1]])

        tensor = tensor.squeeze(0)

        return tensor  # 1, 64, 128, 128


class ToTensor():
    def __call__(self, tensor):
        tensor = np.expand_dims(tensor, 0)
        return torch.from_numpy(tensor)


def tensor2image(tensor):
    image = (127.5 * (tensor.cpu().float().numpy())) + 127.5
    image1 = image[0]
    for i in range(1, tensor.shape[0]):
        image1 = np.hstack((image1, image[i]))

    if image.shape[0] == 1:
        image = np.tile(image, (3, 1, 1))
    # print ('image1.shape:',image1.shape)
    return image1.astype(np.uint8)


class Logger():
    def __init__(self, env_name, ports, n_epochs, batches_epoch):
        self.n_epochs = n_epochs
        self.batches_epoch = batches_epoch
        self.epoch = 1
        self.batch = 1
        self.prev_time = time.time()
        self.mean_period = 0
        self.losses = {}
        self.loss_windows = {}
        self.image_windows = {}

    def log(self, losses=None, images=None):
        self.mean_period += (time.time() - self.prev_time)
        self.prev_time = time.time()

        sys.stdout.write(
            '\rEpoch %03d/%03d [%04d/%04d] -- ' % (self.epoch, self.n_epochs, self.batch, self.batches_epoch))

        for i, loss_name in enumerate(losses.keys()):
            if loss_name not in self.losses:
                self.losses[loss_name] = losses[loss_name].item()
            else:
                self.losses[loss_name] += losses[loss_name].item()

            if (i + 1) == len(losses.keys()):
                sys.stdout.write('%s:%.4f -- ' % (loss_name, self.losses[loss_name] / self.batch))
            else:
                sys.stdout.write('%s:%.4f| ' % (loss_name, self.losses[loss_name] / self.batch))

        batches_done = self.batches_epoch * (self.epoch - 1) + self.batch
        batches_left = self.batches_epoch * (self.n_epochs - self.epoch) + self.batches_epoch - self.batch
        sys.stdout.write('ETA: %s' % (datetime.timedelta(seconds=batches_left * self.mean_period / batches_done)))

        # End of epoch
        if (self.batch % self.batches_epoch) == 0:
            for loss_name, loss in self.losses.items():
                # Reset losses for next epoch
                self.losses[loss_name] = 0.0
            self.epoch += 1
            self.batch = 1
            sys.stdout.write('\n')
        else:
            self.batch += 1


class ReplayBuffer():
    def __init__(self, max_size=50):
        assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
        self.max_size = max_size
        self.data = []

    def push_and_pop(self, data):
        to_return = []
        for element in data.data:
            element = torch.unsqueeze(element, 0)
            if len(self.data) < self.max_size:
                self.data.append(element)
                to_return.append(element)
            else:
                if random.uniform(0, 1) > 0.5:
                    i = random.randint(0, self.max_size - 1)
                    to_return.append(self.data[i].clone())
                    self.data[i] = element
                else:
                    to_return.append(element)
        return Variable(torch.cat(to_return))


class LambdaLR():
    def __init__(self, n_epochs, offset, decay_start_epoch):
        assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
        self.n_epochs = n_epochs
        self.offset = offset
        self.decay_start_epoch = decay_start_epoch

    def step(self, epoch):
        return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)


def weights_init_normal(m):
    # print ('m:',m)
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        torch.nn.init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        torch.nn.init.normal(m.weight.data, 1.0, 0.02)
        torch.nn.init.constant(m.bias.data, 0.0)


def get_config(config):
    with open(config, 'r') as stream:
        return yaml.load(stream)


def smooothing_loss(y_pred):
    dy = torch.abs(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])
    dx = torch.abs(y_pred[:, :, :, 1:] - y_pred[:, :, :, :-1])

    dx = dx * dx
    dy = dy * dy
    d = torch.mean(dx) + torch.mean(dy)
    grad = d
    return d


def cmp(x, y):
    x = os.path.basename(x)
    y = os.path.basename(y)
    epoch_x = int(x.split(".")[0].split("_")[-2])
    epoch_y = int(y.split(".")[0].split("_")[-2])
    index_x = int(x.split(".")[0].split("_")[-1])
    index_y = int(y.split(".")[0].split("_")[-1])
    if epoch_x > epoch_y:
        return 1
    elif epoch_x < epoch_y:
        return -1
    elif epoch_x == epoch_y:
        if index_x > index_y:
            return 1
        elif index_y == index_x:
            return 0
        else:
            return -1
    elif epoch_x < epoch_y:
        return -1


def normalize_negative_v2(data):
    data = data / 255 * 2 - 1
    data = data.astype(np.float32)
    return data


N = 102393.75


def normalize_npy(npy_file):
    data_min = -30
    data_max = 1000
    npy_file = npy_file * N
    npy_file = np.clip(npy_file, data_min, data_max)
    npy_file = (npy_file - data_min) / (data_max - data_min)
    npy_file = npy_file * 255.0
    return npy_file


def cv_show(img: torch.Tensor,if_convert8=False,window_name=""):
    img_ndarray = img.clone().detach().cpu().numpy()
    img_ndarray = np.squeeze(img_ndarray)
    if len(img_ndarray.shape) == 3:
        img_ndarray = img_ndarray[0]
    if if_convert8:
        img_ndarray = img_ndarray.astype(np.uint8)

    cv2.imshow(window_name, img_ndarray)
    cv2.waitKey()
    cv2.destroyAllWindows()


class InferenceResultSaver(object):
    def __init__(self, save_path, name):
        check_create_dir(save_path)
        self.csv_writer = None
        try:
            name = name + "_" + get_current_timestamp() + ".csv"
            self.file_handle = open(os.path.join(save_path, name), 'a+', encoding='utf-8', newline='')
            self.csv_writer = csv.writer(self.file_handle)
            self.csv_writer.writerow(["file_path", "mae", "ssim", "psnr"])
        except Exception as e:
            print(e)

    def write_one_row(self, row_content):
        self.csv_writer.writerow(row_content)

    def __del__(self):
        self.file_handle.close()


if __name__ == '__main__':
    print(cmp("netD_C_1_0.pth", "netD_C_0_540.pth"))
