import sys
import time
import torch
import os
import errno
import torch.nn as nn
from conf import config
from model.resnet import resnet50
import numpy as np

def mkdir_if_missing(directory):
    if not os.path.exists(directory):
        try:
            os.makedirs(directory)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

class Logger(object):
    def __init__(self, fpath=None):
        self.console = sys.stdout
        self.file = None
        if fpath is not None:
            mkdir_if_missing(os.path.dirname(fpath))
            self.file = open(fpath, 'w')

    def __del__(self):
        self.close()

    def __enter__(self):
        pass

    def __exit__(self, *args):
        self.close()

    def write(self, msg):
        self.console.write(msg)
        if self.file is not None:
            self.file.write(msg)

    def flush(self):
        self.console.flush()
        if self.file is not None:
            self.file.flush()
            os.fsync(self.file.fileno())

    def close(self):
        self.console.close()
        if self.file is not None:
            self.file.close()



class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

class LinearEmbed(nn.Module):
    """Embedding module"""
    def __init__(self, dim_in=128, dim_out=2):
        super(LinearEmbed, self).__init__()
        self.linear = nn.Linear(dim_in, dim_out)
        # self.l2norm = Normalize(2)

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        x = self.linear(x)
        # x = self.l2norm(x)
        return x



def accuracy(output, target):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        batch_size = target.size(0)

        probs = torch.softmax(output, 1)
        pred=torch.argmax(probs, dim=1)

        # pred
        # tensor([1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0,
        #         0, 0, 1, 1, 0, 0, 1, 1], device='cuda:0')
        # target
        # tensor([0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1,
        #         1, 0, 1, 1, 0, 0, 0, 1], device='cuda:0')

        correct = pred.eq(target)
        #tensor([ True,  True,  True,  True,  True,  True, False,  True,  True, False,
        # False,  True, False,  True,  True,  True,  True,  True,  True,  True,
        # False,  True,  True,  True,  True,  True,  True,  True,  True, False,
        #  True,  True], device='cuda:0')

        res = []
        correct_k = correct.view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / batch_size))
        return res


def precision(output, target):

    with torch.no_grad():

        probs = torch.softmax(output, 1)
        pred = torch.argmax(probs, dim=1)

        count_1,count_correct=0,0

        for i in range(len(pred)):
            if pred[i]==1:
                count_1+=1
                if pred[i]==target[i]:
                    count_correct+=1
        prec=float(count_correct)*(100.0 / float(count_1))
        return prec


def recall(output, target):

    with torch.no_grad():

        probs = torch.softmax(output, 1)
        pred = torch.argmax(probs, dim=1)

        count_correct,count_target_1=0,0
        for i in range(len(pred)):
            if pred[i]==1 and pred[i]==target[i]:
                count_correct+=1
            if target[i]==1:
                count_target_1+=1
        rec=float(count_correct)*(100.0 / float(count_target_1))
        return rec

def f1(output, target):

    with torch.no_grad():
        probs = torch.softmax(output, 1)
        pred = torch.argmax(probs, dim=1)

        count_predict_1,count_1_correct,count_target_1=0,0,0
        for i in range(len(pred)):
            if pred[i]==1:
                count_predict_1+=1
            if pred[i]==1 and pred[i]==target[i]:
                count_1_correct+=1
        count_target_1=target.float().sum(0,keepdim=True)
        precision=float(count_1_correct)/float(count_predict_1)
        recall=float(count_1_correct)/float(count_target_1)
        f= 2*(precision*recall)/(precision+recall)

        return f


# def accuracy(output, target, topk=1):
#     """Computes the accuracy over the k top predictions for the specified values of k"""
#     with torch.no_grad():
#         maxk = topk
#         batch_size = target.size(0)

#         _, pred = output.topk(maxk, 1, True, True)
#         pred = pred.t()
#         correct = pred.eq(target.view(1, -1).expand_as(pred))
#         res = []

#         correct_k = correct[:1].view(-1).float().sum(0, keepdim=True)
#         res.append(correct_k.mul_(100.0 / batch_size))
#         return res

def adjust_learning_rate(epoch, opt, optimizer):
    """Sets the learning rate to the initial LR decayed by decay rate every steep step"""
    steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
    if steps > 0:
        new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)
        for param_group in optimizer.param_groups:
            param_group['lr'] = new_lr



