import torch
import torch.nn as nn
from torch.autograd import Variable as V

#import cv2
import numpy as np
from radam import RAdam


# 训练框架
class TrainFrameWork():

    def __init__(self, net, optimizer, loss, lr=1e-4):
        self.net = net().cuda()
        self.net = torch.nn.DataParallel(self.net, device_ids=range(torch.cuda.device_count()))

        if optimizer == 'RAdam':
            self.optimizer = RAdam(params=self.net.parameters(), lr=lr)
        elif optimizer == 'Adam':
            self.optimizer = torch.optim.Adam(params=self.net.parameters(), lr=lr)
        elif optimizer == 'RMSprop':
            self.optimizer = torch.optim.RMSprop(params=self.net.parameters(), lr=lr)
        else:
            raise ValueError('Undefined optimizer.')

        self.loss = loss
        self.lr = lr

        '''
        if eval_mode:
            for module in self.net.modules():
                if isinstance(module, nn.BatchNorm2d):
                    module.eval()
        '''

        self.img_batch = None
        self.mask_batch = None
        self.id_batch = None

    def eval(self):
        self.net.eval()

    def train(self):
        self.net.train()

    # 设置输入数据
    def set_input(self, img_batch, mask_batch=None, id_batch=None):
        self.img_batch = img_batch
        self.mask_batch = mask_batch
        self.id_batch = id_batch

    def test_one_img(self, img):
        pred = self.net.forward(img)

        pred[pred > 0.5] = 1
        pred[pred <= 0.5] = 0

        mask = pred.squeeze().cpu().data.numpy()
        return mask


    def _calc_iou(self, true_mask, pred_mask):
        t = true_mask.flatten()
        p = pred_mask.flatten()
        a = np.logical_and(t, p).sum()
        b = np.logical_or(t, p).sum()
        if b > 0:
            iou = a / b
        else:
            iou = 1

        return iou

    def _calc_accuracy(selfself, true_mask, pred_mask):
        m = true_mask.flatten() == pred_mask.flatten()
        a = np.sum(m)
        b = true_mask.size
        return a / b

    # 批量测试
    def test_batch(self):
        with torch.no_grad():
            self._forward()
            mask_batch = self.net.forward(self.img_batch)

        loss = self.loss(mask_batch, self.mask_batch)
        return loss.item(), mask_batch

        '''
        mask_batch[mask_batch > 0.5] = 1
        mask_batch[mask_batch <= 0.5] = 0

        return self._calc_accuracy(self.mask_batch.cpu().data.numpy().astype(np.int32),
                                   mask_batch.astype(np.int32))
        '''

    # 正向计算
    def _forward(self):
        self.img_batch = V(self.img_batch.cuda())
        if self.mask_batch is not None:
            self.mask_batch = V(self.mask_batch.cuda())

    # 优化网络模型
    def optimize(self):
        self._forward()
        self.optimizer.zero_grad()
        pred = self.net.forward(self.img_batch)
        loss = self.loss(pred, self.mask_batch)
        loss.backward()
        self.optimizer.step()
        return loss.item()

    def save(self, path):
        torch.save(self.net.state_dict(), path)

    def load(self, path):
        self.net.load_state_dict(torch.load(path))

    # 降学习率
    def update_lr(self, new_lr, factor=False):
        if factor:
            new_lr = self.lr * new_lr
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = new_lr

        #mylog.write('update learning rate: %f -> %f' % (self.lr, new_lr))
        print('update learning rate: %f -> %f' % (self.lr, new_lr))
        self.lr = new_lr
