import paddle
import time
import numpy as np
from config import Config
from visualdl import LogWriter
import argparse
from models.backbones import *
from models.loss import *
from models import *
from dataset import load_data_loader,load_semi_data_loader
from eval import eval
def train(args,train_loader,val_loader):#有监督学习训练
    model = init_model(args.num_classes)
    iters = args.iters
    eval_iters = args.val_step
    total_times = args.iters * (len(train_loader)//args.batch_size) #总共迭代次数

    #可以在这里设置训练策略
    scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=args.lr, T_max=total_times)
    optimizer = paddle.optimizer.Adam(learning_rate=scheduler,parameters=model.parameters())

    ce_loss = CrossEntropyLoss()

    tot_loss = 0
    model.train()
    with LogWriter(logdir="./log/train") as writer:
        for epoch in range(iters):
            for i,(inputs,labels) in enumerate(train_loader):
                out = model(inputs)[0]
                loss = ce_loss(out,labels.astype('int64'))
                tot_loss += loss.numpy()[0]
                lr = optimizer.get_lr()
                loss.backward() # 反向传播
                optimizer.step() # 更新参数
                optimizer.clear_grad() #清除梯度
                lr_sche = optimizer._learning_rate
                lr_sche.step() #更新学习率

            writer.add_scalar(tag="loss", step=epoch+1, value=tot_loss)
            writer.add_scalar(tag="lr",step=epoch+1,value=lr)
            tot_loss = 0
            if epoch % eval_iters == 0 and epoch != 0:
                print("{}times,start eval......".format(epoch // eval_iters))
                miou = eval(args,val_loader,model)
                model.train()
                writer.add_scalar(tag="miou",step=epoch // eval_iters,value=miou)
                print("{}times,start eval......,miou is:{}".format(epoch // eval_iters,miou))
def main():
    config = Config()
    args = config.args
    train_loader = load_data_loader("train",args.root,args)
    val_loader = load_data_loader("val",args.root,args)
    if args.train_method == "semi":
        semi_loader = load_semi_data_loader(args.semi_root,args)
        semi_train(args,train_loader,val_loader,semi_loader)
    else:
        train(args,train_loader,val_loader)
def semi_train(args,train_loader,val_loader,semi_loader):#半监督学习
    model_G = init_model(args.num_classes) #生成器
    model_D = init_model_D(args.num_classes) #辨别器
    iters = args.iters
    eval_iters = args.val_step
    total_times = args.iters * (len(train_loader)//args.batch_size) #总共的更新次数

    #可以在这里设置训练策略
    scheduler_G = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=args.lr, T_max=total_times)
    optimizer_G = paddle.optimizer.Adam(learning_rate=scheduler_G,parameters=model_G.parameters())

    scheduler_D = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=args.lr, T_max=total_times)
    optimizer_D = paddle.optimizer.Adam(learning_rate=scheduler_D,parameters=model_D.parameters())

    be_loss = BCEWithLogitsLoss2d()
    ce_loss = CrossEntropyLoss()

    train_loader_iter_G = enumerate(train_loader) #G网络训练数据集
    train_loader_iter_D = enumerate(train_loader) #D网络训练数据集
    semi_loader_iter = enumerate(semi_loader) #无监督数据集迭代器，从里面取数据

    tot_semi_cro_loss = 0
    tot_semi_gan_loss = 0
    tot_cross = 0
    tot_gan_loss = 0
    tot_seg_loss = 0


    tot_loss_D = 0
    real_label = 1
    fake_label = 0

    index = 0
    with LogWriter(logdir="./log") as writer:
        for epoch in range(iters):
            #先训练生成器 冻结鉴别器的梯度
            semi_cro_loss = 0 #无监督的交叉熵损失
            semi_gan_loss = 0 #无监督的对抗损失
            cro_loss = 0 #有监督的交叉熵损失
            gan_loss = 0 #有监督的对抗损失
            for param in model_D.parameters():
                param.stop_gradient = True
            #无监督训练部分 前0~semi_iter 为 拿出无监督的数据集 尽量让生成器尽量骗过辨别器 之后为根据鉴别器的指导结果更新自己
            try:
                _,(inputs,labels) = next(semi_loader_iter) #取出没有标签的数据
            except:
                semi_loader_iter = enumerate(train_loader)
                _,(inputs,labels) = next(semi_loader_iter)

            b, c, h, w = inputs.shape
            pred = model_G(inputs)[0]  # [batch,num_classes,h,w]
            semi_G_pred = one_hot(paddle.argmax(pred.detach(),axis=1),args)  # 断了与model_G的联系 这个预测结果是要用来训练鉴别器
            labels = paddle.argmax(pred, axis=1)  # [batch,h,w]
            if epoch < args.start_semi: #少于此迭代次数，指导生成器尽量骗过鉴别器
                semi_ignore_mask = (paddle.ones([b,h,w]) != 1)
                semi_labels = make_gan_label(1,semi_ignore_mask) #[batch,h,w]
                semi_gan_loss = be_loss(model_D(one_hot(labels,args)),semi_labels) #无监督数据集的对抗损失
                tot_semi_gan_loss += semi_gan_loss.numpy()[0]
            else:
                b,_,h,w = pred.shape
                G_pred = model_D(one_hot(labels,args)) #[batch,1,h,w]
                G_pred = nn.functional.sigmoid(G_pred) #产生概率图
                g_ignore_mask = (G_pred > args.mask_T).squeeze(axis=1)
                ignore_255 = paddle.ones(g_ignore_mask.shape,dtype='int64')*255
                t_labels = paddle.where(g_ignore_mask,ignore_255,labels)
                semi_cro_loss = ce_loss(pred,t_labels)*args.lambda_semi
                tot_semi_cro_loss += semi_cro_loss.numpy()[0]
            #有监督训练部分
            try:
                _,(inputs,labels) = next(train_loader_iter_G) #取出有标签的数据
            except:
                train_loader_iter_G = enumerate(train_loader)
                _,(inputs,labels) = next(train_loader_iter_G)
            pred = model_G(inputs)[0] #[batch,num_classes,h,w]
            cro_loss = ce_loss(pred,labels.astype('int64')) #有监督的loss
            tot_cross += cro_loss.numpy()[0]

            f_d_inputs = paddle.argmax(pred,axis=1)
            G_pred = f_d_inputs.detach()
            # [batch,1,h,w]
            t_labels = paddle.ones(labels.shape,dtype='int64')
            gan_loss = be_loss(model_D(one_hot(f_d_inputs,args)),t_labels)*args.lambda_adv
            tot_gan_loss += gan_loss.numpy()[0]

            loss_seg = semi_cro_loss+semi_gan_loss+cro_loss+gan_loss
            tot_seg_loss += loss_seg.numpy()[0]

            if epoch % 30 == 0 and epoch != 0 :
                if epoch < args.start_semi:
                    writer.add_scalar(tag="半监督对抗loss", step=index, value=tot_semi_gan_loss/30)
                    tot_semi_gan_loss = 0
                else :
                    writer.add_scalar(tag="半监督交叉熵loss", step=index, value=tot_semi_cro_loss/30)
                    tot_semi_cro_loss = 0
                writer.add_scalar(tag="有监督对抗loss",step=index,value=tot_gan_loss/30)
                writer.add_scalar(tag="有监督交叉熵loss", step=index, value=tot_cross/30)
                writer.add_scalar(tag="总loss", step=index, value=tot_seg_loss/30)
                tot_gan_loss = 0
                tot_cross = 0
                tot_seg_loss = 0

            loss_seg.backward()

            for param in model_D.parameters():
                param.stop_gradient = False
            loss_D = 0
            f_labels = paddle.zeros([b,h,w], dtype='int64')
            t_labels = paddle.ones([b,h,w], dtype='int64')
            #鉴别器的训练
            #先计算假标签loss
            if args.use_semi:#是否使用生成器的无监督推理结果 去更新 鉴别器
               semi_D_pred = model_D(semi_G_pred).squeeze(axis=1) #[batch,1,h,w]
               loss_D += be_loss(semi_D_pred,f_labels)
            D_pred = model_D(one_hot(G_pred,args)).squeeze(axis=1)
            loss_D += be_loss(D_pred,f_labels)
            #计算真标签loss
            try:
                _,(_,inputs) = next(train_loader_iter_D) #取出有标签的数据
            except:
                train_loader_iter_G = enumerate(train_loader)
                _,(_,inputs) = next(train_loader_iter_G)
            D_pred = model_D(one_hot(inputs,args))
            loss_D += be_loss(D_pred,t_labels)
            tot_loss_D += loss_D.numpy()[0]
            if epoch % 30 == 0 and epoch != 0 :
                writer.add_scalar(tag="鉴别器total_loss",step=index,value=tot_loss_D/30)
                tot_loss_D = 0
                index += 1
            #更新两个网络
            optimizer_G.step()
            optimizer_G.clear_grad()
            scheduler_G.step()
            loss_D.backward()
            optimizer_D.step()
            optimizer_D.clear_grad()
            scheduler_D.step()
            if epoch%100 == 0 and epoch != 0:
                print("{}times,start eval......".format(epoch // 100))
                miou = eval(args,val_loader,model_G)
                writer.add_scalar(tag="miou",step=epoch // 100,value=miou)
def one_hot(label,args):
    label = label.numpy()
    one_hot = np.zeros((label.shape[0], args.num_classes, label.shape[1], label.shape[2]), dtype='float32')
    for i in range(args.num_classes):
        one_hot[:,i,...] = (label==i)
    #handle ignore labels
    return paddle.to_tensor(one_hot)


'''
生成训练gan的label
参数解释 flag:要生成标签全为1的lable 还是标签全为0的label 传入值为 0 1
        ignore_mask:忽略哪些像素点不要计算loss 维度为[1,h,w] 为bool矩阵
'''
def make_gan_label(flag,ignore_mask):
    one = paddle.ones(ignore_mask.shape,dtype='int64')*flag
    mask_255 = paddle.ones(ignore_mask.shape,dtype='int64')*255
    label = paddle.where(ignore_mask,mask_255,one)
    return label  #return [1,h,w]

def init_model(num_classes):
    backbone = ResNet50_vd()
    model = DeepLabV3(num_classes=num_classes,backbone_indices=[3],backbone=backbone)
    return model
def init_model_D(num_classes):
    return FCDiscriminator(num_classes=num_classes)

if __name__ == '__main__':
    main()
    #paddle.set_device('cpu')