'''Some helper functions for PyTorch, including:
    - get_mean_and_std: calculate the mean and std value of dataset.
    - msr_init: net parameter initialization.
    - progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import six
import time
import math

import torch
import torch.nn as nn
import torch.nn.init as init


def get_mean_and_std(dataset):
    '''Compute the mean and std value of dataset.'''
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
    mean = torch.zeros(3)
    std = torch.zeros(3)
    print('==> Computing mean and std..')
    for inputs, targets in dataloader:
        for i in range(3):
            mean[i] += inputs[:,i,:,:].mean()
            std[i] += inputs[:,i,:,:].std()
    mean.div_(len(dataset))
    std.div_(len(dataset))
    return mean, std

def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)

TOTAL_BAR_LENGTH = 15.
last_time = time.time()
begin_time = last_time

_disable_progress_bar = int(os.environ.get("DISABLE_PROGRESS_BAR", 0))
if _disable_progress_bar > 0:
    def progress_bar(current, total, msg=None, ban=""):
        pass
else:
    _, term_width = os.popen('stty size', 'r').read().split()
    term_width = int(term_width)

    def progress_bar(current, total, msg=None, ban=""):
        global last_time, begin_time
        if current == 0:
            begin_time = time.time()  # Reset for new bar.
    
        cur_len = int(TOTAL_BAR_LENGTH*current/total)
        rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
    
        sys.stdout.write(' ['+ban)
        for i in range(cur_len):
            sys.stdout.write('=')
        sys.stdout.write('>')
        for i in range(rest_len):
            sys.stdout.write('.')
        sys.stdout.write(']')
    
        cur_time = time.time()
        step_time = cur_time - last_time
        last_time = cur_time
        tot_time = cur_time - begin_time
    
        L = []
        L.append('  Step: %s' % format_time(step_time))
        L.append(' | Tot: %s' % format_time(tot_time))
        if msg:
            L.append(' | ' + msg)
    
        msg = ''.join(L)
        sys.stdout.write(msg)
        # for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
        #     sys.stdout.write(' ')
    
        # Go back to the center of the bar.
        #for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
        #    sys.stdout.write('\b')
        sys.stdout.write(' %d/%d ' % (current+1, total))
    
        if current < total-1:
            sys.stdout.write('\r')
        else:
            sys.stdout.write('\n')
        sys.stdout.flush()

def format_time(seconds):
    days = int(seconds / 3600/24)
    seconds = seconds - days*3600*24
    hours = int(seconds / 3600)
    seconds = seconds - hours*3600
    minutes = int(seconds / 60)
    seconds = seconds - minutes*60
    secondsf = int(seconds)
    seconds = seconds - secondsf
    millis = int(seconds*1000)

    f = ''
    i = 1
    if days > 0:
        f += str(days) + 'D'
        i += 1
    if hours > 0 and i <= 2:
        f += str(hours) + 'h'
        i += 1
    if minutes > 0 and i <= 2:
        f += str(minutes) + 'm'
        i += 1
    if secondsf > 0 and i <= 2:
        f += str(secondsf) + 's'
        i += 1
    if millis > 0 and i <= 2:
        f += str(millis) + 'ms'
        i += 1
    if f == '':
        f = '0ms'
    return f

def get_forward(forward):
    def _new_forward(self, *args, **kwargs):
        res = forward(self, *args, **kwargs)
        if not hasattr(self, "o_size"):
            self.o_size = tuple(res.size())
        return res
    return _new_forward

_ALREADY_PATCHED = False
def patch_conv2d_4_size():
    global _ALREADY_PATCHED
    nn.Conv2d.forward = get_forward(nn.Conv2d.forward)
    _ALREADY_PATCHED = True

    
class InfIterator(six.Iterator):
    def __init__(self, iterable):
        self.iterable = iterable
        self.iter_ = None

    def __getattr__(self, name):
        return getattr(self.iterable, name)

    def __len__(self):
        return len(self.iterable)

    def __next__(self):
        if self.iter_ is None:
            self.iter_ = iter(self.iterable)
        try:
            data = next(self.iter_)
        except StopIteration:
            self.iter_ = iter(self.iterable)
            data = next(self.iter_)

        return data

    next = __next__

def get_inf_iterator(iterable):
    return InfIterator(iterable)
# valid_queue = get_inf_iterator(DataLoader(...))
# next(valid_queue)

def get_list_str(lst, format_):
    return "[" + ", ".join([format_.format(item) for item in lst]) + "]"

class LabelSmoothingLoss(nn.Module):
    def __init__(self, classes, smoothing=0.0, dim=-1):
        super(LabelSmoothingLoss, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.cls = classes
        self.dim = dim

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=self.dim)
        with torch.no_grad():
            # true_dist = pred.data.clone()
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.cls - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))

def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k)
    return res
