import numpy as np
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.cuda import amp
from torchnet.meter import AverageValueMeter
from tqdm import tqdm


def get_mem():
    return '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)


def one_hot(num_classes, label):
    label = label.numpy()
    arr = np.zeros((label.shape[0], num_classes, label.shape[1], label.shape[2]), dtype=label.dtype)
    for i in range(num_classes):
        arr[:, i, ...] = (label == i)
    return torch.FloatTensor(arr)


def get_next(dataloader, i):
    try:
        batch = next(i)
    except:
        trainloader_iter = iter(dataloader)
        batch = next(trainloader_iter)
    return batch


def find_good_maps(D_outs, pred_all, threshold_st=0.7):
    def compute_argmax_map(output):
        output = output.detach().cpu().numpy()
        output = output.transpose((1, 2, 0))
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)
        output = torch.from_numpy(output).float()
        return output

    D_outs = F.sigmoid(D_outs)
    count = 0
    for i in range(D_outs.size(0)):
        if D_outs[i] > threshold_st:
            count += 1

    if count > 0:
        pred_sel = torch.Tensor(count, pred_all.size(1), pred_all.size(2), pred_all.size(3))
        label_sel = torch.Tensor(count, pred_sel.size(2), pred_sel.size(3))
        num_sel = 0
        for j in range(D_outs.size(0)):
            if D_outs[j] > threshold_st:
                pred_sel[num_sel] = pred_all[j]
                label_sel[num_sel] = compute_argmax_map(pred_all[j])
                num_sel += 1
        return pred_sel.cuda(), label_sel.cuda(), count
    else:
        return 0, 0, count


class TrainEpoch:
    def __init__(self, num_classes,
                 generator, discriminator,
                 criterion_generator, criterion_discriminator,
                 opt_generator, opt_discriminator,
                 metric, device="cuda"):
        self.scaler_d = amp.GradScaler()
        self.scaler_g = amp.GradScaler()
        self.num_classes = num_classes
        self.generator = generator
        self.discriminator = discriminator
        self.criterion_generator = criterion_generator
        self.criterion_discriminator = criterion_discriminator
        self.opt_generator = opt_generator
        self.opt_discriminator = opt_discriminator
        self.metric = metric
        self.device = device
        self.lambda_fm = 0.1
        self.lambda_st = 1.0

        self._to_device()

    def to_device(self, i):
        return [i[0].to(self.device), i[1].to(self.device)]

    def _to_device(self):
        self.generator.to(self.device)
        self.discriminator.to(self.device)
        self.criterion_generator.to(self.device)
        self.criterion_discriminator.to(self.device)
        self.metric.to(self.device)

    def run(self, show_interval, train_loader, train_loader_remain, train_loader_gt,
            train_iter, train_gt_iter, train_remain_iter):
        print(('\n' + '%10s' * 7) % ("train", 'gpu', 'loss', 'precision', 'recall', 'f1', 'iou'))

        # 训练模式
        self.generator.train()
        self.discriminator.train()
        self.opt_generator.zero_grad()
        self.opt_discriminator.zero_grad()

        # loss和指标
        loss_labeled_meter = AverageValueMeter()
        loss_unlabeled_meter = AverageValueMeter()
        loss_fm_meter = AverageValueMeter()
        loss_discriminator_meter = AverageValueMeter()

        iou_meter = AverageValueMeter()
        recall_meter = AverageValueMeter()
        f1_meter = AverageValueMeter()
        precision_meter = AverageValueMeter()

        pbar = tqdm(range(show_interval))
        for _ in pbar:
            for param in self.discriminator.parameters():
                param.requires_grad = False

            # 1
            x, y = self.to_device(get_next(train_loader, train_iter))

            with amp.autocast():
                pred_labeled = self.generator(x)
                loss_labeled = self.criterion_generator(pred_labeled, y)
            loss_labeled_value = loss_labeled.cpu().detach().numpy()
            metrics = self.metric(pred_labeled, y)
            if np.nan != loss_labeled_value:
                loss_labeled_meter.add(loss_labeled_value)
                precision_meter.add(metrics[0])
                recall_meter.add(metrics[1])
                f1_meter.add(metrics[2])
                iou_meter.add(metrics[3])
            pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ("train", get_mem(), loss_labeled_value,
                                                                metrics[0], metrics[1], metrics[2], metrics[3]))

            # 2
            x, _ = self.to_device(get_next(train_loader_remain, train_remain_iter))
            with amp.autocast():
                pred_g = self.generator(x)
                x = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
                pred_cat = torch.cat((F.softmax(pred_g, dim=1), x), dim=1)
                logit, pred_d = self.discriminator(pred_cat)
                pred_sel, labels_sel, good_count = find_good_maps(logit, pred_g)
                if good_count > 0:
                    loss_unlabeled = self.criterion_generator(pred_sel, labels_sel)
                    loss_unlabeled_value = loss_unlabeled.cpu().detach().numpy()
                    metrics = self.metric(pred_sel, y)
                    if np.nan != loss_unlabeled_value:
                        loss_unlabeled_meter.add(loss_unlabeled_value)
                        precision_meter.add(metrics[0])
                        recall_meter.add(metrics[1])
                        f1_meter.add(metrics[2])
                        iou_meter.add(metrics[3])
                    # pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ("unlabeled", get_mem(), loss_unlabeled_value,
                    #                                                     metrics[0], metrics[1], metrics[2], metrics[3]))

            # 3
            x, y = get_next(train_loader_gt, train_gt_iter)
            x = x.to(self.device)
            y = Variable(one_hot(self.num_classes, y)).to(self.device)
            x = (x - torch.min(x)) / (torch.max(x) - torch.min(x))
            D_gt_v_cat = torch.cat((y, x), dim=1)
            with amp.autocast():
                _, D_out_y_gt = self.discriminator(D_gt_v_cat)
                loss_fm = torch.mean(torch.abs(torch.mean(D_out_y_gt, 0) - torch.mean(pred_d, 0)))
            loss_fm_meter.add(loss_fm.detach().cpu().numpy())

            if good_count > 0:
                loss_S = loss_labeled + self.lambda_fm * loss_fm + self.lambda_st * loss_unlabeled
            else:
                loss_S = loss_labeled + self.lambda_fm * loss_fm

            self.scaler_g.scale(loss_S).backward()
            self.scaler_g.step(self.opt_generator)
            self.scaler_g.update()

            # 训练discriminator
            for param in self.discriminator.parameters():
                param.requires_grad = True

            pred_cat = pred_cat.detach()
            with amp.autocast():
                D_out_z, _ = self.discriminator(pred_cat)
                D_out_z = F.sigmoid(D_out_z)
                y_fake_ = Variable(torch.zeros(D_out_z.size(0), 1).cuda())
                loss_D_fake = self.criterion_discriminator(D_out_z, y_fake_)

                D_out_z_gt, _ = self.discriminator(D_gt_v_cat)
                D_out_z_gt = F.sigmoid(D_out_z_gt)
                y_real_ = Variable(torch.ones(D_out_z_gt.size(0), 1).cuda())
                loss_D_real = self.criterion_discriminator(D_out_z_gt, y_real_)

            loss_D = (loss_D_fake + loss_D_real) / 2.0
            self.scaler_d.scale(loss_D).backward()
            self.scaler_d.step(self.opt_discriminator)
            self.scaler_d.update()

            if np.nan != loss_D:
                loss_discriminator_meter.add(loss_D.item())

        logs = {
            'loss': loss_labeled_meter.mean,
            'loss_D': loss_discriminator_meter.mean,
            'precision': precision_meter.mean,
            'recall': recall_meter.mean,
            'f1': f1_meter.mean,
            'iou': iou_meter.mean
        }
        return logs


class ValEpoch:
    def __init__(self, num_classes, generator, criterion_generator, metric, device="cuda"):
        self.num_classes = num_classes
        self.generator = generator
        self.criterion_generator = criterion_generator
        self.metric = metric
        self.device = device

        self._to_device()

    def _to_device(self):
        self.generator.to(self.device)
        self.criterion_generator.to(self.device)
        self.metric.to(self.device)

    @torch.no_grad()
    def run(self, dataloader):
        print(('\n' + '%10s' * 7) % ("val", 'gpu', 'loss', 'precision', 'recall', 'f1', 'iou'))

        # 测试模式
        self.generator.eval()

        # loss和指标
        loss_labeled_meter = AverageValueMeter()
        iou_meter = AverageValueMeter()
        recall_meter = AverageValueMeter()
        f1_meter = AverageValueMeter()
        precision_meter = AverageValueMeter()

        pbar = tqdm(enumerate(dataloader), total=len(dataloader))
        for step, (x, y) in pbar:
            x = x.to(self.device)
            y = y.to(self.device)
            pred = self.generator(x)
            loss = self.criterion_generator(pred, y)
            loss_labeled_value = loss.cpu().detach().numpy()
            metrics = self.metric(pred, y)
            if np.nan != loss:
                loss_labeled_meter.add(loss_labeled_value)
                precision_meter.add(metrics[0])
                recall_meter.add(metrics[1])
                f1_meter.add(metrics[2])
                iou_meter.add(metrics[3])
            pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ("val", get_mem(), loss_labeled_value,
                                                                metrics[0], metrics[1], metrics[2], metrics[3]))
        logs = {
            'loss': loss_labeled_meter.mean,
            'precision': precision_meter.mean,
            'recall': recall_meter.mean,
            'f1': f1_meter.mean,
            'iou': iou_meter.mean
        }
        return logs
