'''
训练版本2
假标签：[batch,max(num_classes),h,w] * [batch,1,h,w]
真标签：[batch,1,h,w] * [batch,1,h,w]

鉴别网络输入 [batch,3,h,w]
'''

import paddle
import time
import numpy as np
from config import Config
from visualdl import LogWriter
import argparse
from models.backbones import *
from models.loss import *
from models import *
from dataset import load_data_loader, load_semi_data_loader
from eval import eval


def main():
    config = Config()
    args = config.args
    train_loader = load_data_loader("train", args.root, args)
    val_loader = load_data_loader("val", args.root, args)
    if args.train_method == "semi":
        semi_loader = load_semi_data_loader(args.semi_root, args)
        semi_train(args, train_loader, val_loader, semi_loader)


# 计算gp
def gradient_penalty(discriminator, real, fake, batchsize, lamt):
    # real是真实图像
    # fake是虚假图像
    t = paddle.uniform((batchsize, 1, 1, 1))
    # 扩大形状
    t = paddle.expand_as(t, real)
    # 对真实图像与虚假图像取噪声求均值
    inter = t * real + (1 - t) * fake
    inter.stop_gradient = False
    inter_ = discriminator(inter)
    # 调用Paddle的API求导
    grads = paddle.grad(
        inter_, inter,
        grad_outputs=paddle.ones_like(inter_)
    )[0]

    epsilon = 1e-12
    # 获取norm
    norm = paddle.sqrt(
        paddle.mean(paddle.square(grads), axis=1) + epsilon
    )
    # lamt是倍率
    gp = paddle.mean((norm - 1) ** 2) * lamt

    return gp


def semi_train(args, train_loader, val_loader, semi_loader):  # 半监督学习
    model_G = init_model(args.num_classes)  # 生成器
    model_D = init_model_D(3)  # 辨别器

    semi_val_step = args.semi_val_step
    total_times = args.iters * (len(train_loader))  # 总共的更新次数

    # 可以在这里设置训练策略
    scheduler_G = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.002, T_max=total_times)
    optimizer_G = paddle.optimizer.RMSProp(learning_rate=scheduler_G, parameters=model_G.parameters())

    scheduler_D = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.001, T_max=total_times * 5)
    optimizer_D = paddle.optimizer.RMSProp(learning_rate=scheduler_D, parameters=model_D.parameters())

    ce_loss = CrossEntropyLoss()

    train_loader_iter = enumerate(train_loader)  # 有监督网络训练数据集
    semi_loader_iter = enumerate(semi_loader)  # 无监督网络训练数据集

    optimizer_G.clear_grad()
    optimizer_D.clear_grad()

    epoch_G = 0
    epoch_D = 0
    with LogWriter(logdir="./log") as writer:
        while epoch_G < total_times:
            for times in range(5):
                try:
                    _, (inputs, labels) = next(train_loader_iter)
                except:
                    train_loader_iter = enumerate(train_loader)
                    _, (inputs, labels) = next(train_loader_iter)
                out = model_G(inputs)[0].detach()  # [b,n,h,w]
                mask = paddle.max(out, axis=1)
                fake_inputs = paddle.ones(inputs.shape)
                true_inputs = paddle.ones(inputs.shape)
                for i in range(3):
                    fake_inputs[:, i, :, :] = mask * inputs[:, i, :, :]
                    true_inputs[:, i, :, :] = inputs[:, i, :, :]
                fake = model_D(fake_inputs)
                true = model_D(true_inputs)
                gp = gradient_penalty(model_D, true_inputs, fake_inputs, args.batch_size, 10)
                loss_D = -paddle.mean(true) + 0.5 * paddle.mean(fake) + gp
                if args.use_semi_discriminate:
                    try:
                        _, (inputs, labels) = next(semi_loader_iter)
                    except:
                        semi_loader_iter = enumerate(semi_loader)
                        _, (inputs, labels) = next(semi_loader_iter)
                    out = model_G(inputs)[0].detach()
                    mask = paddle.max(out, axis=1)
                    for i in range(3):
                        inputs[:, i, :, :] = mask * inputs[:, i, :, :]
                    loss_D += 0.5 * paddle.mean(model_D(inputs))
                writer.add_scalar(tag="鉴别器损失", step=epoch_D, value=loss_D.numpy()[0])
                print("鉴别器训练epoch：{},损失：{}".format(epoch_D, loss_D.numpy()[0]))

                loss_D.backward()
                optimizer_D.step()  # 只更新discriminator的参数
                optimizer_D.clear_grad()
                scheduler_D.step()
                writer.add_scalar(tag="鉴别器lr", step=epoch_D, value=scheduler_D.get_lr())
                epoch_D += 1
                # for param in model_D.parameters():
                # paddle.clip(param,-0.0001,0.0001)

            # 有监督训练
            try:
                _, (inputs, labels) = next(train_loader_iter)  # 取出有标签的数据
            except:
                train_loader_iter = enumerate(train_loader)
                _, (inputs, labels) = next(train_loader_iter)
            out = model_G(inputs)[0]  # [b,n,h,w]
            cro_loss = ce_loss(out, labels.astype('int64'))
            g_inputs = paddle.ones(inputs.shape)
            mask = paddle.max(out, axis=1)
            for i in range(3):
                g_inputs[:, i, :, :] = mask * inputs[:, i, :, :]
            gan_loss = -paddle.mean(model_D(g_inputs))
            semi_gan_loss = 0
            semi_cro_loss = 0
            if epoch_D >= args.start_semi:  # 使用无监督训练
                try:
                    _, (inputs, labels) = next(semi_loader_iter)  # 取出有标签的数据
                except:
                    semi_loader_iter = enumerate(semi_loader)
                    _, (inputs, labels) = next(semi_loader_iter)
                g_semi_inputs = paddle.ones(inputs.shape)
                out = model_G(inputs)[0]
                mask = paddle.max(out, axis=1)
                labels = paddle.argmax(out, axis=1)
                for i in range(3):
                    g_semi_inputs[:, i, :, :] = mask * inputs[:, i, :, :]
                if epoch_D + epoch_G < args.use_semi:  # 使用无监督的label反传
                    semi_gan_loss = -paddle.mean(model_D(g_semi_inputs))
                    writer.add_scalar(tag="无监督gan损失", step=epoch_G, value=semi_gan_loss.numpy()[0])
                else:
                    pass
                    b, _, h, w = out.shape
                    out = model_D(g_semi_inputs)  # [batch,1,h,w] 由判别器产生的真实标签
                    G_pred = nn.functional.sigmoid(out)  # 产生概率图
                    g_ignore_mask = (G_pred > args.mask_T).squeeze(axis=1)
                    ignore_255 = paddle.ones(g_ignore_mask.shape, dtype='int64') * 255
                    t_labels = paddle.where(g_ignore_mask, ignore_255, labels)
                    semi_cro_loss = ce_loss(out, t_labels) * args.lambda_semi
                    writer.add_scalar(tag="无监督交叉熵损失", step=epoch_G, value=semi_cro_loss.numpy()[0])
            loss_seg = cro_loss + gan_loss + semi_cro_loss + semi_gan_loss
            writer.add_scalar(tag="有监督交叉熵损失", step=epoch_G, value=cro_loss.numpy()[0])
            writer.add_scalar(tag="有监督gan损失", step=epoch_G, value=gan_loss.numpy()[0])

            print("生成器训练epoch：{},交叉熵损失：{},有监督gan损失：{}".format(epoch_G, cro_loss.numpy()[0], gan_loss.numpy()[0]))
            loss_seg.backward()
            optimizer_G.step()
            optimizer_G.clear_grad()
            scheduler_G.step()
            writer.add_scalar(tag="生成器lr", step=epoch_G, value=scheduler_G.get_lr())
            epoch_G += 1
            if epoch_G % semi_val_step == 0 and epoch_D + epoch_G > args.use_semi:
                print("{}times,start eval......".format(epoch_G // semi_val_step))
                miou = eval(args, val_loader, model_G)
                writer.add_scalar(tag="miou", step=epoch_G // semi_val_step, value=miou)
                print("{}times,start eval......,miou is:{}".format(epoch_G // semi_val_step, miou))
            if epoch_G % 100 == 0 or epoch_G == 1:
                print("开始保存模型...")
                model_G_state_dict = model_G.state_dict()
                model_D_state_dict = model_D.state_dict()
                opm_D = optimizer_D.state_dict()
                opm_G = optimizer_G.state_dict()
                paddle.save(model_G_state_dict, "out/model_G.pdparams")
                paddle.save(model_D_state_dict, "out/model_D.pdparams")
                paddle.save(opm_D, "out/opm_D.pdopt")
                paddle.save(opm_G, "out/opm_G.pdopt")


def make_gan_label(flag, ignore_mask):
    one = paddle.ones(ignore_mask.shape, dtype='int64') * flag
    mask_255 = paddle.ones(ignore_mask.shape, dtype='int64') * 255
    label = paddle.where(ignore_mask, mask_255, one)
    return label  # return [1,h,w]


def init_model(num_classes):
    backbone = ResNet18_vd()
    model = DeepLabV3(num_classes=num_classes, backbone_indices=[3], backbone=backbone)
    return model


def init_model_D(num_classes):
    return FCDiscriminator(num_classes=num_classes)


if __name__ == '__main__':
    main()
    # paddle.set_device('cpu')