#coding = utf-8

'''
训练hdense unet
'''

from network.HDenseUnetV2 import DenseUNet,denseUnet3dForTrain,dense_rnn_net
from dataset import DatasetHdenseunet,KiTS19,KiTS19Path
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
import torch
from tqdm import tqdm
import numpy as np
from dataset.transform import CropTransform
import torch.nn.functional as F
import random



def train2dunet():
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = DatasetHdenseunet(root="/datasets/3Dircadb/chengkun_only_liver", stack_num=8, spec_classes=[0, 1, 2],
                                img_size=(224, 224),
                                train_transform=transform, valid_transform=transform)
    sampler = RandomSampler(dataset.train_dataset)
    data_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                              num_workers=1, pin_memory=True)
    num_slide = 8


    #model = DenseUNet(in_ch=1, out_ch=3).cuda()
    model = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d/epoll_084.pkl").cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0005)
    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((0.78, 0.65, 8.57), device='cuda'), reduction='mean').cuda()
    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d"

    for i in range(200):
        print("-------------------epoll{}---------------------------".format(i))

        model.train()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()
            input2d = data.permute(1, 0, 2, 3)



            optimizer.zero_grad()
            output = model(input2d)
            cls = output["output"]
            cls = cls.clone().permute(1, 0, 2, 3)
            cls.unsqueeze_(0)
            loss = criterion(cls, mask)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            tbar.set_postfix(loss=f'{loss.item():.5f}')

        torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, i))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

def train2dunetprocess():


    model = DenseUNet(in_ch=1, out_ch=3).cuda()
    #model = torch.load("/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d/epoll_019.pkl").cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=4, verbose=True,
        threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08
    )
    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((0.78, 0.65, 8.57), device='cuda'), reduction='mean').cuda()
    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d"

    for i in range(50):
        print("-------------------epoll{}, shape{}---------------------------".format(i, 224))
        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')



        transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
        dataset = KiTS19(root="/datasets/3Dircadb/chengkun_only_liver", stack_num=0, spec_classes=[0, 1, 2],
                         img_size=(224, 224),
                         use_roi=False, roi_file=None, roi_error_range=5,
                         train_transform=transform, valid_transform=transform)
        sampler = RandomSampler(dataset.train_dataset)
        data_loader = DataLoader(dataset.train_dataset, batch_size=3, sampler=sampler,
                                 num_workers=1, pin_memory=True)

        model.train()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()


            optimizer.zero_grad()

            outputs = model(data)
            losses = {}
            for key, up_outputs in outputs.items():
                if key == "feature":
                    continue
                b, c, h, w = up_outputs.shape
                up_labels = torch.unsqueeze(mask.float(), dim=1)
                up_labels = F.interpolate(up_labels, size=(h, w), mode='bilinear')
                up_labels = torch.squeeze(up_labels, dim=1).long()
                up_loss = criterion(up_outputs, up_labels)
                losses[key] = up_loss

            loss = sum(losses.values())
            loss.backward()
            optimizer.step()

            loss_list.append(losses["output"].item())
            losses['total'] = loss
            for k in losses.keys(): losses[k] = losses[k].item()
            tbar.set_postfix(losses)

        torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, str(i).zfill(3)))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

        scheduler.step(np.mean(np.array(loss_list)))

def train3dmodel():
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = DatasetHdenseunet(root="/datasets/3Dircadb/chengkun_only_liver", stack_num=8, spec_classes=[0, 1, 2],
                                img_size=(224, 224),
                                train_transform=transform, valid_transform=transform)
    sampler = RandomSampler(dataset.train_dataset)
    data_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                             num_workers=1, pin_memory=True)

    model_2d = torch.load("/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d_old1/epoll_032.pkl").cuda()
    model = denseUnet3dForTrain().cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=10, verbose=True,
        threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08
    )
    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((0.78, 0.65, 8.57), device='cuda'),
                                          reduction='mean').cuda()
    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_3d"

    for i in range(300):
        print("-------------------epoll{}, shape{}---------------------------".format(i, 224))
        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')

        model.train()
        model_2d.eval()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()
            input2d = data.permute(1, 0, 2, 3)

            with torch.no_grad():
                output = model_2d(input2d)
                feature2d = output["feature"]

            data = data.unsqueeze(0)
            feature2d = feature2d.permute(1, 0, 2, 3)
            feature2d = feature2d.unsqueeze(0)


            optimizer.zero_grad()
            output = model(data, feature2d)
            loss = criterion(output, mask)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            tbar.set_postfix(loss=f'{loss.item():.5f}')

        if i %30 == 0:
                torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, str(int(i/30)).zfill(3)))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

        scheduler.step(np.mean(np.array(loss_list)))

def trainfusion():
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = DatasetHdenseunet(root="/datasets/3Dircadb/chengkun_only_liver", stack_num=8, spec_classes=[0, 1, 2],
                                img_size=(224, 224),
                                train_transform=transform, valid_transform=transform)
    sampler = RandomSampler(dataset.train_dataset)
    data_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                             num_workers=1, pin_memory=True)

    model_3d = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_3d/epoll_006.pkl").cuda()
    model_2d = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d_old1/epoll_032.pkl").cuda()


    model = dense_rnn_net().cuda()
    model.dense2d.load_state_dict(model_2d.state_dict())
    model.dense3d.load_state_dict(model_3d.dense_unet_3d.state_dict())
    model.finalConv3d1.load_state_dict(model_3d.finalConv3d1.state_dict())
    model.finalBn.load_state_dict(model_3d.finalBn.state_dict())
    model.finalAc.load_state_dict(model_3d.finalAc.state_dict())
    model.finalConv3d2.load_state_dict(model_3d.finalConv3d2.state_dict())

    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=4, verbose=True,
        threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08
    )
    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((0.78, 0.65, 8.57), device='cuda'),
                                          reduction='mean').cuda()
    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_fusion"


    for i in range(300):
        print("-------------------epoll{}, shape{}---------------------------".format(i, 224))
        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')

        model.train()
        model_2d.eval()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()



            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, mask)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            tbar.set_postfix(loss=f'{loss.item():.5f}')


        torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, str(int(i)).zfill(3)))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

        scheduler.step(np.mean(np.array(loss_list)))

def train3dmodel_v2():
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = KiTS19Path("/datasets/3Dircadb/chengkun_only_liver", stack_num=8, spec_classes=[0, 1, 2],
                         img_size=(224, 224),
                         use_roi=False, roi_file=None, roi_error_range=5,
                         train_transform=transform, valid_transform=transform)
    sampler = RandomSampler(dataset.train_dataset)
    data_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                             num_workers=1, pin_memory=True)



    model_2d = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d_old1/epoll_032.pkl").cuda()
    model = denseUnet3dForTrain().cuda()

    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((0.2, 1.2, 2.2), device='cuda'),
                                          reduction='mean').cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=4, verbose=True,
        threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08
    )

    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_3d"
    for i in range(100):
        print("-------------------epoll{}, shape{}---------------------------".format(i, 224))
        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')

        model.train()
        model_2d.eval()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()

            input2d = data.permute(1, 0, 2, 3)

            with torch.no_grad():
                output = model_2d(input2d)
                feature2d = output["feature"]

            data = data.unsqueeze(0)
            feature2d = feature2d.permute(1, 0, 2, 3)
            feature2d = feature2d.unsqueeze(0)

            optimizer.zero_grad()
            output = model(data, feature2d)
            loss = criterion(output, mask)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            tbar.set_postfix(loss=f'{loss.item():.5f}')


        torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, str(int(i)).zfill(3)))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

        scheduler.step(np.mean(np.array(loss_list)))

def trainfusion_v2():
    transform = CropTransform(output_size=(224, 224), roi_error_range=15, use_roi=False)
    dataset = KiTS19Path("/datasets/3Dircadb/chengkun_only_liver", stack_num=8, spec_classes=[0, 1, 2],
                         img_size=(224, 224),
                         use_roi=False, roi_file=None, roi_error_range=5,
                         train_transform=transform, valid_transform=transform)
    sampler = RandomSampler(dataset.train_dataset)
    data_loader = DataLoader(dataset.train_dataset, batch_size=1, sampler=sampler,
                             num_workers=1, pin_memory=True)

    model_3d = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_3d/epoll_001.pkl").cuda()
    model_2d = torch.load(
        "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_2d_old1/epoll_032.pkl").cuda()


    model = dense_rnn_net().cuda()
    model.dense2d.load_state_dict(model_2d.state_dict())
    model.dense3d.load_state_dict(model_3d.dense_unet_3d.state_dict())
    model.finalConv3d1.load_state_dict(model_3d.finalConv3d1.state_dict())
    model.finalBn.load_state_dict(model_3d.finalBn.state_dict())
    model.finalAc.load_state_dict(model_3d.finalAc.state_dict())
    model.finalConv3d2.load_state_dict(model_3d.finalConv3d2.state_dict())

    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.0005)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=4, verbose=True,
        threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08
    )
    criterion = torch.nn.CrossEntropyLoss(weight=torch.tensor((1.0, 1.0, 10.0), device='cuda'),
                                          reduction='mean').cuda()
    model_save_path = "/home/diaozhaoshuo/log/BeliefFunctionNN/chengkung/3dircad/hdenseunet/dense_unet_fusion"


    for i in range(100):
        print("-------------------epoll{}, shape{}---------------------------".format(i, 224))
        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')

        model.train()
        model_2d.eval()
        torch.set_grad_enabled(True)

        loss_list = []
        tbar = tqdm(data_loader, ascii=True, desc='train', dynamic_ncols=True)
        for batch_idx, temp in enumerate(tbar):
            data = temp["image"].cuda()
            mask = temp["label"].cuda()
            mask = mask.type(torch.LongTensor)
            mask = mask.cuda()

            if torch.max(mask) < 2:
                continue



            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, mask)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())

            tbar.set_postfix(loss=f'{loss.item():.5f}')


        torch.save(model, "{}/epoll_{}.pkl".format(model_save_path, str(int(i)).zfill(3)))
        print()
        print("train loss : {}".format(np.mean(np.array(loss_list))))
        print()

        scheduler.step(np.mean(np.array(loss_list)))





if __name__ == '__main__':
    trainfusion_v2()