# -*- coding: utf-8 -*-

import os
import shutil
import numpy as np

import torch
import torch.cuda as cuda
import torch.distributed as dist

def find_free_port():
    import socket
    s = socket.socket()
    s.bind(('', 0))            # Bind to a free port provided by the host.
    return s.getsockname()[1]  # Return the port number assigned.

def reduce_tensor(tensor, world_size):
    r"""
    Reduces the torch.Tensor data across all machines.
    """
    rt = tensor.clone()         # The function operates in-place.

    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= world_size

    return rt


def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    BEST_CKP = 'model_best.pth'
    rank = int(filename.split('_')[1][:-4])

    jobid = os.environ["SLURM_JOBID"]
    CHECKPOINT_DIR = f"./checkpoints/{jobid}"

    checkpoint_file = os.path.join(CHECKPOINT_DIR, filename)

    torch.save(state, checkpoint_file)
    if rank == 0:
        best_acc = 0.0
        if os.path.exists(os.path.join(CHECKPOINT_DIR, BEST_CKP)):
            hist_best = torch.load(os.path.join(CHECKPOINT_DIR, BEST_CKP))
            best_acc = hist_best['best_acc1']
        for ckp in os.listdir(CHECKPOINT_DIR):
            if 'checkpoint' not in ckp:
                continue
            hist_ckp = torch.load(os.path.join(CHECKPOINT_DIR, ckp))
            hist_acc = hist_ckp['best_acc1']
            if best_acc < hist_acc:
                shutil.copyfile(checkpoint_file,
                                os.path.join(CHECKPOINT_DIR, BEST_CKP))


class Lighting(object):
    """
    Lighting noise(AlexNet - style PCA - based noise).
    """
    def __init__(self, alphastd, eigval, eigvec):
        self.alphastd = alphastd
        self.eigval = eigval
        self.eigvec = eigvec

    def __call__(self, img):
        if self.alphastd == 0:
            return img

        alpha = img.new().resize_(3).normal_(0, self.alphastd)
        rgb = self.eigvec.type_as(img).clone()\
            .mul(alpha.view(1, 3).expand(3, 3))\
            .mul(self.eigval.view(1, 3).expand(3, 3))\
            .sum(1).squeeze()

        return img.add(rgb.view(3, 1, 1).expand_as(img))


class DataPrefetcher():
    """
    DataPrefetcher is a class used to asynchronously prefetch training data minibatch to the GPU
    device with CUDA streams.
    """
    def __init__(self, loader, stop_after=None):
        self.loader = loader
        self.dataset = loader.dataset
        self.stream = cuda.Stream()
        self.stop_after = stop_after
        self.next_input = None
        self.next_target = None

    def __len__(self):
        return len(self.loader)

    def preload(self):
        try:
            self.next_input, self.next_target = next(self.loaditer)
        except StopIteration:
            self.next_input, self.next_target = None, None
            return

        with cuda.stream(self.stream):
            self.next_input = self.next_input.cuda(non_blocking=True)
            self.next_target = self.next_target.cuda(non_blocking=True)

    def __iter__(self):
        count = 0
        self.loaditer = iter(self.loader)
        self.preload()
        while self.next_input is not None:
            cuda.current_stream().wait_stream(self.stream)
            input = self.next_input
            target = self.next_target
            self.preload()
            count += 1
            yield input, target
            if isinstance(self.stop_after, int) and (count > self.stop_after):
                break


class EarlyStopping:
    """
    Early stops the training if validation loss doesn't improve after a given patience.
    Reference: [1] https://github.com/Bjarten/early-stopping-pytorch.git
               [2] Pull requests from jeffreyng99.
    """
    def __init__(self, patience=7, verbose=False, delta=0, general_checkpoint=False,
                 checkpoint_dir='./checkpoints', filename='checkpoint.pt',
                 trace_func=print):
        """
        Args:
            patience (int): How long to wait after last time validation loss improved.
                            Default: 7
            verbose (bool): If True, prints a message for each validation loss improvement.
                            Default False
            delta (float): Minimum change in the monitored quantity to qualify as an improvement.
                            Default 0
            general_checkpoint (bool): Saves addition information that can be used to resume training.
                            Default: False
            path (str): Path for the checkpoint to be saved to.
                            Default: 'checkpoint.pt'
            trace_func (function): trace print function.
                            Default: print
        """
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.top1_acc_min = np.Inf
        self.delta = delta
        self.general_checkpoint = general_checkpoint
        self.checkpoint_dir = checkpoint_dir
        self.filename = filename
        self.trace_func = trace_func

        if not os.path.isdir(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

    def __call__(self, top1_acc, model, epoch=None, optimizer=None):
        score = top1_acc
        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(top1_acc, model)
        elif score < self.best_score + self.delta:
            self.counter += 1
            self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(top1_acc, model, epoch=epoch, optimizer=optimizer)
            self.counter = 0

    def save_checkpoint(self, top1_acc, model, epoch=None, optimizer=None):
        """
        Saves model when validation loss decrease.
        """
        if self.verbose:
            self.trace_func(f'Validation loss decreased ({self.top1_acc_min:.6f} -- > {top1_acc:.6f}).'
                            ' Saving model...')

        if self.general_checkpoint and epoch is not None and optimizer is not None:
            torch.save({
                'epoch': epoch,
                'top1_acc': top1_acc,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, os.path.join(self.checkpoint_dir, self.filename))
        else:
            torch.save(model.state_dict(), os.path.join(self.checkpoint_dir, self.filename))
        self.top1_acc_min = top1_acc
