import os
import torch
import fnmatch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
import shutil
from dataset.nyuv2ssl import *
from torch.autograd import Variable
from model.segnet_mtl import SegNet
from model.mapfns import Mapfns
from model.mapfns import ControlNet
from utils.evaluation import ConfMatrix, DepthMeter, NormalsMeter
import numpy as np
import pdb
from progress.bar import Bar as Bar
from utils import Logger, AverageMeter, accuracy, mkdir_p, savefig
from torch.autograd import Variable
import copy
from IPython import embed 
# import wandb 
import datetime
from os.path import join, split,exists
from loguru import logger as extra_logger 

from jupyters.utils import  * 
# compute parameter space
from jupyters.control_net_sampler import ControlNetSampler


from jupyters.control_net_sampler import  * 


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

from os.path import isdir, isfile, exists, join


def get_current_time():
    return datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") 

def make_dirs(path):
    if not exists(path):
        os.makedirs(path)
        
        
def save_checkpoint(state, is_best, opt, filename='checkpoint.pth.tar'):
    checkpoint = opt.out
    filepath = os.path.join(checkpoint, 'mtl_xtc_{}_{}_{}_{}_'.format(opt.ssl_type, opt.rampup, opt.con_weight, opt.reg_weight) + filename)
    torch.save(state, filepath)
    if is_best:
        shutil.copyfile(filepath, os.path.join(checkpoint, 'mtl_xtc_{}_{}_{}_{}_'.format(opt.ssl_type, opt.rampup, opt.con_weight, opt.reg_weight) + 'model_best.pth.tar'))

class NYUTrainer:
    def __init__(self,opt):
        self.opt = opt
        self.tasks = ['semantic', 'depth', 'normal']
        self.input_channels = [13, 1, 3]

        if self.opt.resume :
            self.opt.out = '/'.join(self.opt.resume.split('/')[:-1])
        else:
            #* prepare the results path 
            self.opt.out = self.opt.out+"_" + get_current_time() + "#%d"%(self.opt.batch_size) + "#%.1e"%(self.opt.model_lr)
            if len(self.opt.load_pretrained) > 2:
                self.opt.out += "#R" 
            self.opt.out += self.opt.suffix
            
            paths = [self.opt.ssl_type, 'mtl']
            for i in range(len(paths)):
                self.opt.out = os.path.join(self.opt.out, paths[i])    
            make_dirs(self.opt.out)

        

        
        #* save config 
        with open (join(self.opt.out,'config.txt'),'w')    as f :
            f.write('\n'.join(["%s: %s"%(k,str(v)) for k, v in opt.__dict__.items()]))

    


        #todo ?  single task learning
        self.stl_performance = {
            'full': {'semantic': 37.447399999999995, 'depth': 0.607902, 'normal': 25.938105}, 
            'onelabel': {'semantic': 26.1113, 'depth': 0.771502, 'normal': 30.073763}, 
            'randomlabels': {'semantic': 28.7153, 'depth': 0.754012, 'normal': 28.946388}
        }



        #* logger
        title = 'NYUv2'
        #* save information into txt file 
        self.logger = Logger(os.path.join(self.opt.out, 'mtl_xtc_{}_{}_{}_{}_log.txt'.format(self.opt.ssl_type, self.opt.rampup, self.opt.con_weight, self.opt.reg_weight)), title=title)
        self.logger.set_names(['Epoch', 'T.Ls', 'T. mIoU', 'T. Pix', 'T.Ld', 'T.abs', 'T.rel', 'T.Ln', 'T.Mean', 'T.Med', 'T.11', 'T.22', 'T.30',
            'V.Ls', 'V. mIoU', 'V. Pix', 'V.Ld', 'V.abs', 'V.rel', 'V.Ln', 'V.Mean', 'V.Med', 'V.11', 'V.22', 'V.30', 'Con L', 'Ws', 'Wd', 'Wn'])


        # USE_CONTROLNET = True
        self.init_model()
        self.init_dataloader()

    def init_model(self,):
        #* define model, optimiser and scheduler
        #* main model, used to inference 
        self.start_epoch = 0
        if not self.opt.gt_only:
            model = SegNet(type_=self.opt.type, class_nb=13).cuda()
            #* init_optmizer
            params = []
            params += model.parameters()
            if not self.opt.use_controlnet: #* not for controlnet, I think 
                mapfns = Mapfns(tasks=self.tasks, input_channels=self.input_channels).cuda()
                mapfns_params = [v for k, v in mapfns.named_parameters() if 'gamma' not in k and 'beta' not in k]
                params += mapfns_params
                print("%d params are optimized with main model "%(len(mapfns_params)))
                
            
            if self.opt.load_pretrained:
                loaded_ckpt = torch.load(self.opt.load_pretrained)
                load_results = model.load_state_dict(loaded_ckpt['state_dict'], strict=False)
                print('model has been loaded from %s, \t but missing key: '%(self.opt.load_pretrained), '\t'.join(load_results[0]))

            self.optimizer = optim.Adam(params, lr=self.opt.model_lr)
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=100, gamma=0.5)
            self.model = model
            
        #* mapping function for multi-task, Mapfns is proposed  by  <Learning Multiple Dense Prediction Tasks from Partially Annotated Data>
        if self.opt.use_controlnet:
            if self.opt.stage2:
                assert self.opt.finetuned_controlnet_model is not None 
                self.controlnet_sampler = ControlNetSampler(self.opt.finetuned_controlnet_model)
                extra_logger.info('finetuned controlnet loaded !')
            else:
                mapfns = ControlNet(out_dir = self.opt.out)

                #* only optimize control branch 
                """
                    np.unique(['.'.join(k.split('.')[:3]) for k,v in mapfns.model.named_parameters()])
                    model.diffusion_model.output_blocks
                """
                params_controlnet = list(mapfns.model.control_model.parameters())
                extra_logger.info('only train conditional branch of the controlnet')
                #* unload diffusion decoder  parameter 
                if self.opt.train_controlnet_decoder:
                    params_controlnet += list(mapfns.model.model.diffusion_model.output_blocks.parameters())
                    params_controlnet += list(mapfns.model.model.diffusion_model.out.parameters())
                    extra_logger.info('unfreeze the decoder of diffusion (Unet) as well.') 
                optimizer_film = optim.Adam(params_controlnet, lr=1e-5)
                self.optimizer_film =optimizer_film
        else:
            params_film = [v for k, v in mapfns.named_parameters() if 'gamma' in k or 'beta' in k]
            # optimizer for the conditional auxiliary network
            optimizer_film = optim.Adam(params_film, lr=1e-3)
            scheduler_film = optim.lr_scheduler.StepLR(optimizer_film, step_size=30, gamma=0.5)
            self.optimizer_film = optimizer_film
            self.scheduler_film = scheduler_film

        
        if self.opt.resume: #* resume all
            #todo the self.opt.gt_only mode
            checkpoint = torch.load(self.opt.resume)
            if checkpoint.get('state_dict') is not None :
                load_results = model.load_state_dict(checkpoint['state_dict'], strict=True)
                print('model has been loaded from %s, \t but missing key: '%(self.opt.resume), '\t'.join(load_results[0]))
                
            if checkpoint.get('epoch') is not None :
                self.start_epoch = checkpoint.get('epoch') if checkpoint.get('epoch')  is not None else 0
                
            if checkpoint.get('optimizer') is not None :
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            else:
                print('not optimier being resumed ')
                
            if checkpoint.get('mapfns') is not None:
                #* if using controlNet, the model structure is inconsistent and need further consideration
                mapfns.load_state_dict(checkpoint['mapfns'])

            if checkpoint.get('optimizer_film') is not None :
                self.optimizer_film.load_state_dict(checkpoint['optimizer_film'])
            #* update out directory
            
            print('=> checkpoint from {} loaded!'.format(opt.resume))

        if not self.opt.stage2:
            self.mapfns = mapfns
        # print('Parameter Space: ABS: {:.1f}, REL: {:.4f}\n'.format(count_parameters(model),count_parameters(model)/24981069))

    def init_dataloader(self):
        #* define dataset path
        dataset_path = self.opt.dataroot
        #? what does label weight use for ?  is it the information indicating which task is labeled ( manunal generate)? how to generate it ?  
        if self.opt.ssl_type == 'onelabel':
            self.labels_weights = torch.load('{}onelabel.pth'.format(self.opt.labelroot))['labels_weights'].float().cuda()
        elif self.opt.ssl_type == 'randomlabels':
            self.labels_weights = torch.load('{}randomlabels.pth'.format(self.opt.labelroot))['labels_weights'].float().cuda()
        nyuv2_train_set = NYUv2_crop(root=dataset_path, train=True, augmentation=True, aug_twice=True)
        # nyuv2_train_set = NYUv2_crop(root=dataset_path, train=True, augmentation=False, aug_twice=False)
        nyuv2_test_set = NYUv2(root=dataset_path, train=False)

        nyuv2_train_loader = torch.utils.data.DataLoader(
            dataset=nyuv2_train_set,
            batch_size=self.opt.batch_size,
            shuffle=True, num_workers=8, drop_last=True, persistent_workers=True)

        nyuv2_test_loader = torch.utils.data.DataLoader(
            dataset=nyuv2_test_set,
            batch_size=self.opt.batch_size,
            shuffle=False, num_workers=8, persistent_workers=True)
        
        self.nyuv2_train_loader = nyuv2_train_loader
        self.nyuv2_test_loader = nyuv2_test_loader


    def train_control_net(self):
        
        # define parameters
        train_batch = len(self.nyuv2_train_loader)
        best_performance = -100
        isbest=False

        for epoch in range(self.start_epoch, self.opt.total_epoch):
            index = epoch
            bar = Bar('Training', max=train_batch)
            if not self.opt.gt_only:
                self.model.train()
            self.mapfns.train()

            con_loss_ave = AverageMeter()

            
            for k, (train_data, train_label, train_depth, train_normal, image_index, train_data1, train_label1, \
                train_depth1, train_normal1, trans_params) in enumerate(self.nyuv2_train_loader):
            # #* not augment
            # for k, (train_data1, train_label1, train_depth1, train_normal1, image_index) in enumerate(self.nyuv2_train_loader):
                """
                    image_index: unique id in dataset 
                    suffix with '1' indicates the data with augmentation
                    trans_params: image config, such as weight, height and so forth

                    draw_semantics(train_data1[0], train_label1[0], 0.5).save('a.jpg')
                """
                if not self.opt.gt_only:
                    train_data, train_label = train_data.cuda(), train_label.type(torch.LongTensor).cuda()
                    train_depth, train_normal = train_depth.cuda(), train_normal.cuda()
                train_data1, train_label1 = train_data1.cuda(), train_label1.type(torch.LongTensor).cuda()
                train_depth1, train_normal1 = train_depth1.cuda(), train_normal1.cuda()
                
                if not self.opt.gt_only:
                    train_data_ = torch.cat([train_data, train_data1], dim=0)
                    train_pred, logsigma, feat = self.model(train_data_)
                    feat_aug = feat[0][self.opt.batch_size:]
                    feat = feat[0][:self.opt.batch_size]
                    train_pred_aug = [train_pred[0][self.opt.batch_size:], train_pred[1][self.opt.batch_size:], train_pred[2][self.opt.batch_size:]]
                    train_pred = [train_pred[0][:self.opt.batch_size], train_pred[1][:self.opt.batch_size], train_pred[2][:self.opt.batch_size]]
                
                #* calculate loss                
                loss = 0
                for ind_ in range(len(image_index)):
                    if self.opt.ssl_type == 'full':
                        w = torch.ones(len(self.tasks)).float().cuda()
                    else:
                        w = self.labels_weights[image_index[ind_]].clone().float().cuda()

                    #* GT data prepare    
                    train_target_ind = [train_label1[ind_].unsqueeze(0), \
                        train_depth1[ind_].unsqueeze(0), train_normal1[ind_].unsqueeze(0)]
                    
                    #* prediction data prepare
                    if not self.opt.gt_only:
                        train_pred_seg = train_pred_aug[0][ind_][None,:,:,:]
                        train_pred_depth = train_pred_aug[1][ind_][None,:,:,:]
                        train_pred_normal = train_pred_aug[2][ind_][None,:,:,:]
                        train_pred_ind = [train_pred_seg, train_pred_depth, train_pred_normal]

                    #* compute the cross-task consistency loss
                    #* train diffusion
                    #!================
                    if self.opt.task_debug == 0:
                        focus_task = torch.tensor([1,0,0]).cuda()
                    elif self.opt.task_debug == 1:
                        focus_task = torch.tensor([0,1,0]).cuda()
                    elif self.opt.task_debug == 2:
                        focus_task = torch.tensor([0,0,1]).cuda()
                    else:
                        # focus_task = copy.deepcopy(w)
                        focus_task = torch.tensor([1,1,1]).cuda()
                    #!================
                    """
                    draw_semantics(train_data1[0].cpu(), train_label1[0].cpu(), 0.5).save('a.jpg')
                    """
                    con_loss = self.mapfns.mapping_back_forward_one4all(train_target_ind, \
                         focus_task, step_index=k * len(image_index) + ind_, \
                                    original_rgb_img = train_data1[ind_])
                    
                    loss += con_loss / len(image_index)
                
                #* only optimizer the cross domain part 
                self.optimizer_film.zero_grad()
                loss.backward()
                self.optimizer_film.step()
                bar.next()
            
            bar.finish()

            #todo: save controlnet only 
            save_checkpoint({
                'epoch': epoch + 1,
                # 'state_dict': self.model.state_dict(),
                'mapfns': self.mapfns.state_dict(),
                # 'best_performance': best_performance,
                # 'optimizer': self.optimizer.state_dict(),
                'optimizer_film': self.optimizer_film.state_dict(),
                # 'avg_cost': None,
            }, isbest,self.opt)
            
     

    def get_diffusion_loss(self,w,train_target_ind, train_pred_ind):

        target_task_index = (w.data == 1).nonzero(as_tuple=False).view(-1) #* '1' indicates the label is available
        source_task_index = (w.data == 0).nonzero(as_tuple=False).view(-1) #* not available
        
        if w[0] == 1:
            semantic__ = train_target_ind[0] #* value range from -1 to 12, including 0, -1 indicates noise
        else:
            semantic__ = train_pred_ind[0].argmax(dim=1) #*value range from 0 to 12,  
        

        prompts = []
        hint_imgs = []
        #!================================================
        # w = torch.tensor([0.0,0.0,0.0]).cuda()
        #!================================================
        for idx in range(len(w)):
            if w[idx] == 1:
                prompt, hint_img  = self.controlnet_sampler.get_controlnet_input(train_target_ind[idx],\
                        semantic__,task_id = idx, is_gt = True)
            elif w[idx] == 0:
                prompt, hint_img  = self.controlnet_sampler.get_controlnet_input(train_pred_ind[idx],\
                        semantic__,task_id = idx, is_gt = False)
            #* there are some predicted value larger than 10, so after transfering into image it is larger then 255.. 
            #* fix this problem
            hint_img.masked_fill_(hint_img > 255, 255)
            hint_img.masked_fill_(hint_img <0, 0)

            # if not torch.all(hint_img <= 255) or not torch.all(hint_img >= 0):
            #     print('hello world')
            prompts.append(prompt)
            hint_imgs.append(hint_img)

        x_samples, unet_feature = self.controlnet_sampler.samples(prompt,torch.cat(hint_imgs))

        con_loss = 0.0
        for t_id in target_task_index.cpu().numpy().tolist():
            for s_id in source_task_index.cpu().numpy().tolist():
                #* calculate the loss
                con_loss += F.mse_loss(unet_feature[s_id],unet_feature[t_id])
                # con_loss += 1 - F.cosine_similarity(unet_feature[s_id],unet_feature[t_id]).mean()
        return con_loss


    def train_mtl(self):
        # define parameters
        train_batch = len(self.nyuv2_train_loader)
        test_batch = len(self.nyuv2_test_loader)
        T = self.opt.temp
        avg_cost = np.zeros([self.opt.total_epoch, 24], dtype=np.float32)
        ctl_cost = np.zeros([self.opt.total_epoch, 1], dtype=np.float32)
        lambda_weight = np.zeros([3, self.opt.total_epoch])
        best_performance = -100
        isbest=False

        for epoch in range(self.start_epoch, self.opt.total_epoch):
            index = epoch
            extra_logger.info('lr at {} th epoch is {} for optimizer'.format(index, self.optimizer.param_groups[0]['lr']))
            
            cost = np.zeros(24, dtype=np.float32)
            bar = Bar('Training', max=train_batch)

            # iteration for all batches
            self.model.train()

            con_loss_ave = AverageMeter()
            cost_seg = AverageMeter()
            cost_depth = AverageMeter()
            cost_normal = AverageMeter()
            k = -1
            for train_data, train_label, train_depth, train_normal, image_index, train_data1, train_label1, \
                train_depth1, train_normal1, trans_params in self.nyuv2_train_loader:
                """
                    image_index: unique id in dataset 
                    suffix with '1' indicates the data with augmentation
                    trans_params: image config, such as weight, height and so forth
                """
                k += 1
                train_data, train_label = train_data.cuda(), train_label.type(torch.LongTensor).cuda()
                train_depth, train_normal = train_depth.cuda(), train_normal.cuda()
                train_data1, train_label1 = train_data1.cuda(), train_label1.type(torch.LongTensor).cuda()
                train_depth1, train_normal1 = train_depth1.cuda(), train_normal1.cuda()
                
                train_data_ = torch.cat([train_data, train_data1], dim=0)
                train_pred, logsigma, feat = self.model(train_data_)
                feat_aug = feat[0][self.opt.batch_size:]
                feat = feat[0][:self.opt.batch_size]
                train_pred_aug = [train_pred[0][self.opt.batch_size:], train_pred[1][self.opt.batch_size:], train_pred[2][self.opt.batch_size:]]
                train_pred = [train_pred[0][:self.opt.batch_size], train_pred[1][:self.opt.batch_size], train_pred[2][:self.opt.batch_size]]
                
                #* calculate loss                
                loss = 0
                for ind_ in range(len(image_index)):
                    if self.opt.ssl_type == 'full':
                        w = torch.ones(len(self.tasks)).float().cuda()
                    else:
                        w = self.labels_weights[image_index[ind_]].clone().float().cuda()
                        
                    train_pred_seg = train_pred_aug[0][ind_][None,:,:,:]
                    train_pred_depth = train_pred_aug[1][ind_][None,:,:,:]
                    train_pred_normal = train_pred_aug[2][ind_][None,:,:,:]
                    _sc, _h, _w, _i, _j, height, width = trans_params[ind_]
                    _h, _w, _i, _j, height, width = int(_h), int(_w), int(_i), int(_j), int(height), int(width)
                    
                    train_target_ind = [train_label1[ind_].unsqueeze(0), \
                        train_depth1[ind_].unsqueeze(0), train_normal1[ind_].unsqueeze(0)]

                    #* mtm loss
                    train_loss_ind = self.model.model_fit(train_pred[0][ind_].unsqueeze(0), train_label[ind_].unsqueeze(0),\
                        train_pred[1][ind_].unsqueeze(0), train_depth[ind_].unsqueeze(0), train_pred[2][ind_].unsqueeze(0),\
                            train_normal[ind_].unsqueeze(0))

                    for i in range(len(self.tasks)):#* compress the contribution of unlabeled prediction
                        if w[i] == 0:
                            train_loss_ind[i] = 0
                            
                    #* mtm loss
                    train_pred_ind = [train_pred_seg, train_pred_depth, train_pred_normal]

                    #* compute the cross-task consistency loss
                    #* use control net by default
                    # if self.opt.use_controlnet:
                    #* train diffusion 
                    #todo use diffusion model to inference                 
                    con_loss = self.get_diffusion_loss(w,train_target_ind, train_pred_ind)

                    # diffusion_loss = self.controlnet_sampler.control_net_model.mapping_back_forward_one4all(train_target_ind, \
                    #     w, step_index=k * len(image_index) + ind_, original_rgb_img = train_data1[ind_])


                    #? fixed by default
                    con_loss_ave.update(con_loss.item(), 1)
                    
                    loss  += sum(train_loss_ind) / len(image_index) + con_loss * self.opt.con_weight / len(image_index)
                    
                    """
                    self.optimizer.zero_grad()
                    train_loss_ind.backward()
                    torch.Tensor([v.grad.sum()  for k,v in self.model.named_parameters() if v.grad is not None ]).mean() : 0.1767                    
                    """
                                            
                #* only for recording 
                train_loss = self.model.model_fit(train_pred[0], train_label, train_pred[1], train_depth, train_pred[2], train_normal)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                    
                
                    
                
                #* logger 
                cost_seg.update(train_loss[0].item(), self.opt.batch_size)
                cost_depth.update(train_loss[1].item(), self.opt.batch_size)
                cost_normal.update(train_loss[2].item(), self.opt.batch_size)
                cost[0] = train_loss[0].item()
                cost[1] = self.model.compute_miou(train_pred[0], train_label).item()
                cost[2] = self.model.compute_iou(train_pred[0], train_label).item()
                cost[3] = train_loss[1].item()
                cost[4], cost[5] = self.model.depth_error(train_pred[1], train_depth)
                cost[6] = train_loss[2].item()
                cost[7], cost[8], cost[9], cost[10], cost[11] = self.model.normal_error(train_pred[2], train_normal)
                avg_cost[index, :12] += cost[:12] / train_batch
                ctl_cost[index, 0] += con_loss / train_batch
                bar.suffix  = '({batch}/{size}) | LossS: {loss_s:.4f} | LossD: {loss_d:.4f} | LossN: {loss_n:.4f} | Ws: {ws:.4f} | Wd: {wd:.4f}| Wn: {wn:.4f} | CTL: {ctl:.4f} | CW: {cw:.2f}'.format(
                            batch=k + 1,
                            size=train_batch,
                            # loss_s=cost[1],
                            # loss_d=cost[3],
                            # loss_n=cost[6],
                            loss_s=cost_seg.avg,
                            loss_d=cost_depth.avg,
                            loss_n=cost_normal.avg,
                            ws=w[0].data,
                            wd=w[1].data,
                            wn=w[2].data,
                            ctl=con_loss_ave.avg,
                            cw=1,
                            )
                bar.next()
            bar.finish()

            
            
            if self.opt.eval_last20 == 0:
                evaluate = True
            elif self.opt.eval_last20 and (epoch + 1) > (total_epoch - 20):
                evaluate = True
            else:
                evaluate = False

            # evaluating test data
            if evaluate:
                self.model.eval()
                conf_mat = ConfMatrix(self.model.class_nb)
                depth_mat = DepthMeter()
                normal_mat = NormalsMeter()
                with torch.no_grad():  # operations inside don't track history
                    k = -1
                    for test_data, test_label, test_depth, test_normal in iter(self.nyuv2_test_loader):
                        k += 1
                        test_data, test_label = test_data.cuda(),  test_label.type(torch.LongTensor).cuda()
                        test_depth, test_normal = test_depth.cuda(), test_normal.cuda()

                        test_pred, _, _ = self.model(test_data)
                        test_loss = self.model.model_fit(test_pred[0], test_label, test_pred[1], test_depth, test_pred[2], test_normal)

                        conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
                        depth_mat.update(test_pred[1], test_depth)
                        normal_mat.update(test_pred[2], test_normal)
                        cost[12] = test_loss[0].item()
                        cost[15] = test_loss[1].item()
                        cost[18] = test_loss[2].item()

                        avg_cost[index, 12:] += cost[12:] / test_batch
                    avg_cost[index, 13:15] = conf_mat.get_metrics()
                    depth_metric = depth_mat.get_score()
                    avg_cost[index, 16], avg_cost[index, 17] = depth_metric['l1'], depth_metric['rmse']
                    normal_metric = normal_mat.get_score()
                    avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], \
                        avg_cost[index, 23] = normal_metric['mean'], normal_metric['rmse'], normal_metric['11.25'], \
                            normal_metric['22.5'], normal_metric['30']
                self.scheduler.step()
                if not self.opt.use_controlnet: #* controlNet dose not need to update the learning rate 
                    self.scheduler_film.step()

                mtl_performance = 0.0
                mtl_performance += (avg_cost[index, 13]* 100 - self.stl_performance[self.opt.ssl_type]['semantic']) / self.stl_performance[self.opt.ssl_type]['semantic']
                mtl_performance -= (avg_cost[index, 16] - self.stl_performance[self.opt.ssl_type]['depth']) / self.stl_performance[self.opt.ssl_type]['depth']
                mtl_performance -= (avg_cost[index, 19] - self.stl_performance[self.opt.ssl_type]['normal']) / self.stl_performance[self.opt.ssl_type]['normal']
                mtl_performance = mtl_performance / len(self.tasks) * 100
                isbest = mtl_performance > best_performance
                print('current performance: {:.4f}, best performance: {:.4f}'.format(mtl_performance, best_performance))

                print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                    'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                    .format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
                            avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],
                            avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13],
                            avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18],
                            avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23]))
                
                extra_logger.info(
                'Epoch: {:04d} | \
                    TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                    'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                    .format(index, 
                            avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], 
                            avg_cost[index, 3],avg_cost[index, 4], avg_cost[index, 5], 
                            avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],avg_cost[index, 10], avg_cost[index, 11], 
                            
                            avg_cost[index, 12], avg_cost[index, 13],avg_cost[index, 14], 
                            avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], 
                            avg_cost[index, 18],avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23]))
                
                
                self.logger.append([index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
                            avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],
                            avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13],
                            avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18],
                            avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], ctl_cost[index, 0],
                            lambda_weight[0, index], lambda_weight[1, index], lambda_weight[2, index]])

            if isbest:
                best_performance = mtl_performance
                print_index = index
   
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.state_dict(),
                'best_performance': best_performance,
                'optimizer': self.optimizer.state_dict(),
                'avg_cost': avg_cost,
            }, isbest,self.opt)
                
        print_index = index
        print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                .format(print_index, avg_cost[print_index, 0], avg_cost[print_index, 1], avg_cost[print_index, 2], avg_cost[print_index, 3],
                        avg_cost[print_index, 4], avg_cost[print_index, 5], avg_cost[print_index, 6], avg_cost[print_index, 7], avg_cost[print_index, 8], avg_cost[print_index, 9],
                        avg_cost[print_index, 10], avg_cost[print_index, 11], avg_cost[print_index, 12], avg_cost[print_index, 13],
                        avg_cost[print_index, 14], avg_cost[print_index, 15], avg_cost[print_index, 16], avg_cost[print_index, 17], avg_cost[print_index, 18],
                        avg_cost[print_index, 19], avg_cost[print_index, 20], avg_cost[print_index, 21], avg_cost[print_index, 22], avg_cost[print_index, 23]))

        extra_logger.info(
            'Epoch: {:04d} | \
                TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                .format(print_index, 
                        avg_cost[print_index, 0], avg_cost[print_index, 1], avg_cost[print_index, 2], 
                        avg_cost[print_index, 3],avg_cost[print_index, 4], avg_cost[print_index, 5], 
                        avg_cost[print_index, 6], avg_cost[print_index, 7], avg_cost[print_index, 8], avg_cost[print_index, 9],avg_cost[print_index, 10], avg_cost[print_index, 11], 
                        
                        avg_cost[print_index, 12], avg_cost[print_index, 13],avg_cost[print_index, 14], 
                        avg_cost[print_index, 15], avg_cost[print_index, 16], avg_cost[print_index, 17], 
                        avg_cost[print_index, 18],avg_cost[print_index, 19], avg_cost[print_index, 20], avg_cost[print_index, 21], avg_cost[print_index, 22], avg_cost[print_index, 23]))


    def train(self):
        # define parameters
        train_batch = len(self.nyuv2_train_loader)
        test_batch = len(self.nyuv2_test_loader)
        T = self.opt.temp
        avg_cost = np.zeros([self.opt.total_epoch, 24], dtype=np.float32)
        ctl_cost = np.zeros([self.opt.total_epoch, 1], dtype=np.float32)
        lambda_weight = np.zeros([3, self.opt.total_epoch])
        best_performance = -100
        isbest=False

        for epoch in range(self.start_epoch, self.opt.total_epoch):
            index = epoch
            if hasattr(self,'optimizer'):
                extra_logger.info('lr at {} th epoch is {} for optimizer and {} for cross domain loss model'.format(index, \
                    self.optimizer.param_groups[0]['lr'], self.optimizer_film.param_groups[0]['lr']))
            else:
                extra_logger.info('lr at {} th epoch is  {} for cross domain loss model'.format(index, \
                    self.optimizer_film.param_groups[0]['lr']))
                
                
            cost = np.zeros(24, dtype=np.float32)

            #? apply Dynamic Weight Average
            if self.opt.weight == 'dwa':
                if index == 0 or index == 1:
                    lambda_weight[:, index] = 1.0
                else:
                    w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0]
                    w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3]
                    w_3 = avg_cost[index - 1, 6] / avg_cost[index - 2, 6]
                    lambda_weight[0, index] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
                    lambda_weight[1, index] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
                    lambda_weight[2, index] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))

            bar = Bar('Training', max=train_batch)

            # iteration for all batches
            self.model.train()
            self.mapfns.train()

            con_loss_ave = AverageMeter()
            cost_seg = AverageMeter()
            cost_depth = AverageMeter()
            cost_normal = AverageMeter()
            k = -1
            for train_data, train_label, train_depth, train_normal, image_index, train_data1, train_label1, \
                train_depth1, train_normal1, trans_params in self.nyuv2_train_loader:
                """
                    image_index: unique id in dataset 
                    suffix with '1' indicates the data with augmentation
                    trans_params: image config, such as weight, height and so forth
                """
                k += 1
                train_data, train_label = train_data.cuda(), train_label.type(torch.LongTensor).cuda()
                train_depth, train_normal = train_depth.cuda(), train_normal.cuda()
                train_data1, train_label1 = train_data1.cuda(), train_label1.type(torch.LongTensor).cuda()
                train_depth1, train_normal1 = train_depth1.cuda(), train_normal1.cuda()
                
                train_data_ = torch.cat([train_data, train_data1], dim=0)
                train_pred, logsigma, feat = self.model(train_data_)
                feat_aug = feat[0][self.opt.batch_size:]
                feat = feat[0][:self.opt.batch_size]
                train_pred_aug = [train_pred[0][self.opt.batch_size:], train_pred[1][self.opt.batch_size:], train_pred[2][self.opt.batch_size:]]
                train_pred = [train_pred[0][:self.opt.batch_size], train_pred[1][:self.opt.batch_size], train_pred[2][:self.opt.batch_size]]
                
                #* calculate loss                
                loss = 0
                for ind_ in range(len(image_index)):
                    if self.opt.ssl_type == 'full':
                        w = torch.ones(len(self.tasks)).float().cuda()
                    else:
                        w = self.labels_weights[image_index[ind_]].clone().float().cuda()
                        
                    train_pred_seg = train_pred_aug[0][ind_][None,:,:,:]
                    train_pred_depth = train_pred_aug[1][ind_][None,:,:,:]
                    train_pred_normal = train_pred_aug[2][ind_][None,:,:,:]
                    _sc, _h, _w, _i, _j, height, width = trans_params[ind_]
                    _h, _w, _i, _j, height, width = int(_h), int(_w), int(_i), int(_j), int(height), int(width)
                    
                    train_target_ind = [train_label1[ind_].unsqueeze(0), \
                        train_depth1[ind_].unsqueeze(0), train_normal1[ind_].unsqueeze(0)]

                    if not self.opt.freeze_segnet:
                        train_loss_ind = self.model.model_fit(train_pred[0][ind_].unsqueeze(0), train_label[ind_].unsqueeze(0),\
                            train_pred[1][ind_].unsqueeze(0), train_depth[ind_].unsqueeze(0), train_pred[2][ind_].unsqueeze(0),\
                                train_normal[ind_].unsqueeze(0))

                        for i in range(len(self.tasks)):#* compress the contribution of unlabeled prediction
                            if w[i] == 0:
                                train_loss_ind[i] = 0

                    train_pred_ind = [train_pred_seg, train_pred_depth, train_pred_normal]

                    #* compute the cross-task consistency loss
                    if self.opt.use_controlnet:
                        #* train diffusion 
                        con_loss = self.mapfns(train_pred_ind, train_target_ind, \
                            feat_aug[ind_].unsqueeze(0), copy.deepcopy(w), \
                                step_index=k * len(image_index) + ind_, \
                                    ssl_type=self.opt.ssl_type,\
                                        original_rgb_img = train_data1[ind_])
                    else:
                        con_loss = self.mapfns(train_pred_ind, train_target_ind, \
                            feat_aug[ind_].unsqueeze(0), copy.deepcopy(w), ssl_type=self.opt.ssl_type)
                        
                    #? fixed by default
                    if self.opt.rampup == 'up':
                        if epoch > 99:
                            con_weight = 1
                        else:
                            con_weight = (k/train_batch + epoch) / 100
                    else:
                        con_weight = 1
                    con_weight *= self.opt.con_weight

                    con_loss_ave.update(con_loss.item(), 1)
                    
                    if not self.opt.freeze_segnet:
                        loss = loss + sum(train_loss_ind) / len(image_index) + con_loss * con_weight / len(image_index)
                    else:
                        loss += con_loss * con_weight / len(image_index)
                        
                #* only for recording 
                train_loss = self.model.model_fit(train_pred[0], train_label, train_pred[1], train_depth, train_pred[2], train_normal)
                if not self.opt.freeze_segnet:
                    self.optimizer.zero_grad()
                    self.optimizer_film.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    self.optimizer_film.step()
                else:
                    #* only optimizer the cross domain part 
                    
                    self.optimizer_film.zero_grad()
                    loss.backward()
                    self.optimizer_film.step()
                    
                
                #* logger 
                cost_seg.update(train_loss[0].item(), self.opt.batch_size)
                cost_depth.update(train_loss[1].item(), self.opt.batch_size)
                cost_normal.update(train_loss[2].item(), self.opt.batch_size)
                cost[0] = train_loss[0].item()
                cost[1] = self.model.compute_miou(train_pred[0], train_label).item()
                cost[2] = self.model.compute_iou(train_pred[0], train_label).item()
                cost[3] = train_loss[1].item()
                cost[4], cost[5] = self.model.depth_error(train_pred[1], train_depth)
                cost[6] = train_loss[2].item()
                cost[7], cost[8], cost[9], cost[10], cost[11] = self.model.normal_error(train_pred[2], train_normal)
                avg_cost[index, :12] += cost[:12] / train_batch
                ctl_cost[index, 0] += con_loss / train_batch
                bar.suffix  = '({batch}/{size}) | LossS: {loss_s:.4f} | LossD: {loss_d:.4f} | LossN: {loss_n:.4f} | Ws: {ws:.4f} | Wd: {wd:.4f}| Wn: {wn:.4f} | CTL: {ctl:.4f} | CW: {cw:.2f}'.format(
                            batch=k + 1,
                            size=train_batch,
                            # loss_s=cost[1],
                            # loss_d=cost[3],
                            # loss_n=cost[6],
                            loss_s=cost_seg.avg,
                            loss_d=cost_depth.avg,
                            loss_n=cost_normal.avg,
                            ws=w[0].data,
                            wd=w[1].data,
                            wn=w[2].data,
                            ctl=con_loss_ave.avg,
                            cw=con_weight,
                            )
                bar.next()
            bar.finish()

            
            if self.opt.freeze_segnet:
                evaluate = False
            elif self.opt.eval_last20 == 0:
                evaluate = True
            elif self.opt.eval_last20 and (epoch + 1) > (total_epoch - 20):
                evaluate = True
            else:
                evaluate = False

            # evaluating test data
            if evaluate:
                self.model.eval()
                conf_mat = ConfMatrix(self.model.class_nb)
                depth_mat = DepthMeter()
                normal_mat = NormalsMeter()
                with torch.no_grad():  # operations inside don't track history
                    k = -1
                    for test_data, test_label, test_depth, test_normal in iter(self.nyuv2_test_loader):
                        k += 1
                        test_data, test_label = test_data.cuda(),  test_label.type(torch.LongTensor).cuda()
                        test_depth, test_normal = test_depth.cuda(), test_normal.cuda()

                        test_pred, _, _ = self.model(test_data)
                        test_loss = self.model.model_fit(test_pred[0], test_label, test_pred[1], test_depth, test_pred[2], test_normal)

                        conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
                        depth_mat.update(test_pred[1], test_depth)
                        normal_mat.update(test_pred[2], test_normal)
                        cost[12] = test_loss[0].item()
                        cost[15] = test_loss[1].item()
                        cost[18] = test_loss[2].item()

                        avg_cost[index, 12:] += cost[12:] / test_batch
                    avg_cost[index, 13:15] = conf_mat.get_metrics()
                    depth_metric = depth_mat.get_score()
                    avg_cost[index, 16], avg_cost[index, 17] = depth_metric['l1'], depth_metric['rmse']
                    normal_metric = normal_mat.get_score()
                    avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], \
                        avg_cost[index, 23] = normal_metric['mean'], normal_metric['rmse'], normal_metric['11.25'], \
                            normal_metric['22.5'], normal_metric['30']
                self.scheduler.step()
                if not self.opt.use_controlnet: #* controlNet dose not need to update the learning rate 
                    self.scheduler_film.step()

                mtl_performance = 0.0
                mtl_performance += (avg_cost[index, 13]* 100 - self.stl_performance[self.opt.ssl_type]['semantic']) / self.stl_performance[self.opt.ssl_type]['semantic']
                mtl_performance -= (avg_cost[index, 16] - self.stl_performance[self.opt.ssl_type]['depth']) / self.stl_performance[self.opt.ssl_type]['depth']
                mtl_performance -= (avg_cost[index, 19] - self.stl_performance[self.opt.ssl_type]['normal']) / self.stl_performance[self.opt.ssl_type]['normal']
                mtl_performance = mtl_performance / len(self.tasks) * 100
                isbest = mtl_performance > best_performance
                print('current performance: {:.4f}, best performance: {:.4f}'.format(mtl_performance, best_performance))

                print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                    'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                    .format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
                            avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],
                            avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13],
                            avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18],
                            avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23]))
                
                extra_logger.info(
                'Epoch: {:04d} | \
                    TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                    'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                    .format(index, 
                            avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], 
                            avg_cost[index, 3],avg_cost[index, 4], avg_cost[index, 5], 
                            avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],avg_cost[index, 10], avg_cost[index, 11], 
                            
                            avg_cost[index, 12], avg_cost[index, 13],avg_cost[index, 14], 
                            avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], 
                            avg_cost[index, 18],avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23]))
                
                
                self.logger.append([index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
                            avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],
                            avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12], avg_cost[index, 13],
                            avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16], avg_cost[index, 17], avg_cost[index, 18],
                            avg_cost[index, 19], avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], ctl_cost[index, 0],
                            lambda_weight[0, index], lambda_weight[1, index], lambda_weight[2, index]])

            if isbest:
                best_performance = mtl_performance
                print_index = index
   
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': self.model.state_dict(),
                'mapfns': self.mapfns.state_dict(),
                'best_performance': best_performance,
                'optimizer': self.optimizer.state_dict(),
                'optimizer_film': self.optimizer_film.state_dict(),
                'avg_cost': avg_cost,
            }, isbest,self.opt)
                
        print_index = index
        print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                .format(print_index, avg_cost[print_index, 0], avg_cost[print_index, 1], avg_cost[print_index, 2], avg_cost[print_index, 3],
                        avg_cost[print_index, 4], avg_cost[print_index, 5], avg_cost[print_index, 6], avg_cost[print_index, 7], avg_cost[print_index, 8], avg_cost[print_index, 9],
                        avg_cost[print_index, 10], avg_cost[print_index, 11], avg_cost[print_index, 12], avg_cost[print_index, 13],
                        avg_cost[print_index, 14], avg_cost[print_index, 15], avg_cost[print_index, 16], avg_cost[print_index, 17], avg_cost[print_index, 18],
                        avg_cost[print_index, 19], avg_cost[print_index, 20], avg_cost[print_index, 21], avg_cost[print_index, 22], avg_cost[print_index, 23]))

        extra_logger.info(
            'Epoch: {:04d} | \
                TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} '
                'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
                .format(print_index, 
                        avg_cost[print_index, 0], avg_cost[print_index, 1], avg_cost[print_index, 2], 
                        avg_cost[print_index, 3],avg_cost[print_index, 4], avg_cost[print_index, 5], 
                        avg_cost[print_index, 6], avg_cost[print_index, 7], avg_cost[print_index, 8], avg_cost[print_index, 9],avg_cost[print_index, 10], avg_cost[print_index, 11], 
                        
                        avg_cost[print_index, 12], avg_cost[print_index, 13],avg_cost[print_index, 14], 
                        avg_cost[print_index, 15], avg_cost[print_index, 16], avg_cost[print_index, 17], 
                        avg_cost[print_index, 18],avg_cost[print_index, 19], avg_cost[print_index, 20], avg_cost[print_index, 21], avg_cost[print_index, 22], avg_cost[print_index, 23]))


if __name__ == "__main__":
    
    parser = argparse.ArgumentParser(description='Multi-task partially-supervised learning with cross-task consistency (SegNet)')
    parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep')
    parser.add_argument('--weight', default='uniform', type=str, help='multi-task weighting: uniform')
    parser.add_argument('--dataroot', default='./data/nyuv2', type=str, help='dataset root')
    parser.add_argument('--temp', default=1.0, type=float, help='temperature for DWA (must be positive)')
    parser.add_argument('--wlr', default=0.001, type=float, help='initial learning rate')
    parser.add_argument('--out', default='./results/nyuv2', help='Directory to output the result')
    parser.add_argument('--alpha', default=1.5, type=float, help='hyper params of GradNorm')
    parser.add_argument('--ssl-type', default='randomlabels', type=str, help='ssl type: onelabel, randomlabels, full')
    parser.add_argument('--labelroot', default='./data/nyuv2_settings/', type=str, help='partially setting root')
    parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
    parser.add_argument('--load_pretrained', default='', type=str, metavar='PATH', help='path to pretrained checkpoint (default: none)')
    parser.add_argument('--eval-last20', default=0, type=int, help='1 means we evaluate models in the last 20 epochs')
    parser.add_argument('--rampup', default='fixed', type=str, help='up for ramp-up loss weight of cross-task consistency loss, fixed use constant loss weight.')
    parser.add_argument('--con-weight', default=2.0, type=float, help='weight for cross-task consistency loss')
    parser.add_argument('--reg-weight', default=0.5, type=float, help='weight for cross-task consistency loss')
    parser.add_argument('--batch_size', default=2, type=int, help='weight for cross-task consistency loss')
    parser.add_argument('--suffix', default='', type=str, help='the suffix of result directory')
    parser.add_argument('--model_lr', default=1e-4, type=float, help='multi-task model learning rate ')
    parser.add_argument('--total_epoch', default=200, type=int, help='total_epoch')
    parser.add_argument('--use_controlnet', action='store_true',help=' use controlnet as cross domain control tool')
    parser.add_argument('--train_controlnet_decoder', action='store_true',help=' use controlnet as cross domain control tool')
    
    parser.add_argument('--freeze_segnet', action='store_true',help=' feeze MTPSL')
    parser.add_argument('--gt_only', action='store_true',help=' only use GT for controlNet training')
    parser.add_argument('--task_debug', default=0, type=int, help=' debug only one task, 0 indicates semantic, 1 indicates depth and 2 indicates normal')


    #* use controlnet for stage 2 
    parser.add_argument('--stage2', action='store_true',help=' only use GT for controlNet training')
    parser.add_argument('--finetuned_controlnet_model', default=None, type=str, help='the finetuned controlnet path ')
    

    opt = parser.parse_args()
    trainer = NYUTrainer(opt)
    # trainer.train()
    if opt.stage2:
        trainer.train_mtl()
    else:
        trainer.train_control_net()

    
    
    