import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.nn.init as init
import numpy as np
import model.config_task as config_task
import pdb
import einops

import pytorch_lightning as pl
from cldm.logger import ImageLogger
from cldm.model import create_model, load_state_dict
import matplotlib.pyplot as plt
# from torch.utils.tensorboard import SummaryWriter
from loguru import logger
import random

from pytorch_lightning import seed_everything
from os.path import join, split,exists, isfile


# import wandb 
import copy
import torchvision
import datetime
from jupyters.utils import  * 
from cldm.ddim_hacked import DDIMSampler
import torchvision.transforms as T
__conditioning_keys__ = {'concat': 'c_concat',
                         'crossattn': 'c_crossattn',
                         'adm': 'y'}
import cv2
def cross_entropy_loss(logits, targets):
    log_p_y = F.log_softmax(logits, dim=1)
    labels = targets.type(torch.long)
    loss = F.nll_loss(log_p_y, labels, reduction='mean')
    return loss



class conv_task(nn.Module):
    
    def __init__(self, in_planes, planes, stride=1, kernel_size=3, padding=1, num_tasks=2):
        super(conv_task, self).__init__()
        self.num_tasks = num_tasks
        self.conv = nn.Conv2d(in_channels=in_planes, out_channels=planes, kernel_size=3, padding=1)
        self.gamma = nn.Parameter(torch.ones(planes, num_tasks*(num_tasks-1)))
        self.beta = nn.Parameter(torch.zeros(planes, num_tasks*(num_tasks-1)))
        self.bn = nn.BatchNorm2d(num_features=planes)
        self.relu = nn.ReLU(inplace=True)
    
    def forward(self, x):

        # first, get the taskpair information: compute A
        A_taskpair = config_task.A_taskpair

        x = self.conv(x)

        # generate taskpair-specific FiLM parameters
        gamma = torch.mm(A_taskpair, self.gamma.t())
        beta = torch.mm(A_taskpair, self.beta.t())
        gamma = gamma.view(1, x.size(1), 1, 1)
        beta = beta.view(1, x.size(1), 1, 1)

        x = self.bn(x)

        # taskpair-specific transformation
        x = x * gamma + beta
        x = self.relu(x)


        return x

class SegNet_enc(nn.Module):
    def __init__(self, input_channels):
        super(SegNet_enc, self).__init__()
        # initialise network parameters
        filter = [64, 128, 256, 512, 512]
        self.filter = filter
        self.num_tasks = len(input_channels)
        # Task-specific input layer
        self.pred_encoder_source = nn.ModuleList([self.pre_conv_layer([input_channels[0], filter[0]])])
        for i in range(1, len(input_channels)):
            self.pred_encoder_source.append(self.pre_conv_layer([input_channels[i], filter[0]]))

        # define shared mapping function, which is conditioned on the taskpair
        self.encoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
        for i in range(len(filter)-1):
            self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))

        # define convolution layer
        self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
        for i in range(len(filter)-1):
            if i == 0:
                self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
            else:
                self.conv_block_enc.append(nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
                                                         self.conv_layer([filter[i + 1], filter[i + 1]])))
        # define pooling and unpooling functions
        self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)

    def forward(self, x, input_task):
        g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * len(self.filter) for _ in range(5))
        for i in range(len(self.filter)):
            g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
        
        # task-specific input layer
        # if input_task is not None:
        x = self.pred_encoder_source[input_task](x)

        # shared mapping function
        for i in range(len(self.filter)):
            if i == 0:
                g_encoder[i][0] = self.encoder_block[i](x)
                g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
                g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
            else:
                g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
                g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
                g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])

        return g_maxpool[-1]


    def conv_layer(self, channel):
        return conv_task(in_planes=channel[0], planes=channel[1], num_tasks=self.num_tasks)

    def pre_conv_layer(self, channel):
        conv_block = nn.Sequential(
            nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
            nn.BatchNorm2d(num_features=channel[1]),
            nn.ReLU(inplace=True)
        )
        return conv_block


class Mapfns(nn.Module):
    def __init__(self, tasks=['semantic', 'depth', 'normal'], input_channels=[13, 1, 3]):
        super(Mapfns, self).__init__()
        # initialise network parameters
        assert len(tasks) == len(input_channels) 
        self.tasks = tasks 
        self.input_channels = {}
        for t, task in enumerate(tasks):
            self.input_channels[task] = input_channels[t]

        self.mapfns = SegNet_enc(input_channels=input_channels)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                nn.init.constant_(m.bias, 0)

    def forward(self, x_pred, gt, feat, w, ssl_type, reg_weight=0.5):
        if ssl_type == 'full':
            target_task_index = torch.arange(len(self.tasks)) 
            source_task_index = torch.arange(len(self.tasks))
        else:
            target_task_index = (w.data == 1).nonzero(as_tuple=False).view(-1)
            source_task_index = (w.data == 0).nonzero(as_tuple=False).view(-1)

        # loss = torch.tensor(0).to(feat.device)
        loss = 0
        if len(source_task_index) > 0:
            for source_task in source_task_index:
                for target_task in target_task_index:
                    if source_task != target_task:

                        source_pred = x_pred[source_task]
                        target_gt = gt[target_task]

                        source_pred = self.pre_process_pred(source_pred, task=self.tasks[source_task])
                        target_gt = self.pre_process_gt(target_gt, task=self.tasks[target_task])

                        # config_task.source_task = [source_task]
                        # config_task.target_task = [target_task]
                        # source_task, target_task = config_task.source_task[0], config_task.target_task[0]
                        A_taskpair = torch.zeros(len(self.tasks), len(self.tasks)).to(source_pred.device)
                        A_taskpair[source_task, target_task] = 1.0
                        n, m = A_taskpair.size()
                        A_taskpair = A_taskpair.flatten()[:-1].view(n-1,n+1)[:,1:].flatten().view(1,-1)
                        config_task.A_taskpair = A_taskpair

                        mapout_source = self.mapfns(source_pred, input_task=source_task)
                        mapout_target = self.mapfns(target_gt, input_task=target_task)

                        loss = loss + self.compute_loss(mapout_source, mapout_target, feat, reg_weight=reg_weight)
                 

        return loss

    def pre_process_pred(self, pred, task):
        if task == 'semantic':
            x_pred = F.gumbel_softmax(pred, dim=1, tau=1, hard=True)
            while torch.isnan(x_pred.sum()):
                x_pred = F.gumbel_softmax(pred, dim=1, tau=1, hard=True)
            pred = x_pred
        elif task == 'depth':
            x_pred = pred / (pred.max() + 1e-12) 
            pred = x_pred
        elif task == 'normal':
            pred = (pred + 1.0) / 2.0
        return pred

    def pre_process_gt(self, gt, task):
        if task == 'semantic':
            gt = gt.unsqueeze(0)
            binary_mask = (gt == -1).type(torch.FloatTensor).cuda()
            num_classes = self.input_channels[task]
            gt_ = gt.float() * (1 - binary_mask)
            gt__ = torch.zeros(gt.size(0), num_classes, gt.size(2), gt.size(3)).scatter_(1, gt_.type(torch.LongTensor), 1).cuda().detach() * (1 - binary_mask)
        elif task == 'depth':
            gt__ = gt / (gt.max() + 1e-12)
        else:
            gt__ = (gt + 1.0) / 2.0
            # gt__ = gt
        return gt__

    def compute_loss(self, mapout_source, mapout_target, feat, reg_weight=0.5):
        # cross-task consistency
        l_s_t = 1 - F.cosine_similarity(mapout_source, mapout_target, dim=1, eps=1e-12).mean()
        # regularization
        l_s_f = 1 - F.cosine_similarity(mapout_source, feat.detach(), dim=1, eps=1e-12).mean()
        l_t_f = 1 - F.cosine_similarity(mapout_target, feat.detach(), dim=1, eps=1e-12).mean()

        if reg_weight > 0:
            loss = l_s_t + reg_weight * (l_s_f + l_t_f)
        else:
            loss = l_s_t
        return loss

class ControlNet(nn.Module):
    def __init__(self, controlnet_path='results/control_sd15_ini.ckpt', sd_locked=True,
                 only_mid_control=False, tasks=['semantic', 'depth', 'normal'], input_channels=[13, 1, 3],out_dir=None):
        super(ControlNet, self).__init__()
        model = create_model('./models/cldm_v15.yaml').cpu()
        model.load_state_dict(load_state_dict(controlnet_path, location='cpu'))
        
        
        # model.learning_rate = learning_rate
        model.sd_locked = sd_locked
        model.only_mid_control = only_mid_control
        self.model = model.cuda()
        self.tasks = tasks

        self.input_channels = {}
        for t, task in enumerate(tasks):
            self.input_channels[task] = input_channels[t]
        
    
        #!=================================================================
        #* for  tensorboard
        # log_dir = './tb_logs/' 
        # self.writer = SummaryWriter(log_dir=log_dir)
        #* for logger 
        if out_dir is not None:
            log_dir = join(out_dir , datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+ ".txt" )# 获取当前时间
        else:
            log_dir = './tb_logs/' + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+ ".txt"# 获取当前时间
        
        logger.remove(handler_id=None)  # 清除之前的设置
        logger.add(sink=log_dir, enqueue=True)  
        
        #!=================================================================
        self.full_task_names = copy.deepcopy(tasks)
        for i in range(len(self.full_task_names)):
            if self.full_task_names[i] == 'semantic':
                self.full_task_names[i] = 'semantic segmentation'
            elif self.full_task_names[i] == 'depth':
                self.full_task_names[i] = 'depth estimation'
            elif self.full_task_names[i] == 'normal':
                self.full_task_names[i] = 'surface normal estimation'

        self.text_prompt = "from"
        self.text_prompt_2 = 'to'
        #* inference  through controlNet 
        self.ddim_sampler = DDIMSampler(self.model)

        self.nyud2_class_name_mapping = {
            1: 'bed',
            2: 'books',
            3: 'ceiling',
            4: 'chair',
            5: 'floor',
            6: 'furniture',
            7: 'objects',
            8: 'picture',
            9: 'sofa',
            10: 'table',
            11: 'tv',
            12: 'wall',
            13: 'window'
        }

    def process_predicted_segmentation(self, input_tensor):
        """
            mapping the [B,13,W,H] prediction into  [B,3,W,H] image for diffusion model processing
        """
        L = torch.tensor(input_tensor.shape[1], dtype=torch.float)
        b = int(L ** (1 / 3)) + 1  # 13
        m = 256 // b
        """
        upgrade the argmax into soft argmax
        l = input_tensor.argmax(dim=1)
        """
        
        B,C,H,W = input_tensor.shape
        l = (input_tensor * 1e7 ) .softmax(1)
        l = (torch.arange(0,13).unsqueeze(0).unsqueeze(-1).unsqueeze(-1).repeat(B,1,H,W).cuda() * l).sum(1)

        
        

        num_seq_r = l // b ** 2
        num_seq_g = (l % b ** 2) // b
        num_seq_b = l % b

        R = 255 - num_seq_r * m
        G = 255 - num_seq_g * m
        B = 255 - num_seq_b * m

        return torch.stack([R, G, B]).permute(1, 2, 3, 0).float()

      





    def process_gt_segmentation(self, input_tensor, num_classes):
        L = num_classes
        b = int(L ** (1 / 3)) + 1  # 19
        m = 256 // b

        l = input_tensor
        binary_mask = (l == -1).type(torch.FloatTensor).to(l.device)
        binary_mask = binary_mask.unsqueeze(-1).repeat(1, 1, 1, 3)

        num_seq_r = l // b ** 2
        num_seq_g = (l % b ** 2) // b
        num_seq_b = l % b

        R = 255 - num_seq_r * m
        G = 255 - num_seq_g * m
        B = 255 - num_seq_b * m

        mapped_segmentation = torch.stack([R, G, B]).permute(1, 2, 3, 0)
        result = mapped_segmentation * (1 - binary_mask)

        return result

    def forward(self, x_pred, gt, feat, w, ssl_type, step_index, reg_weight=0.5,original_rgb_img = None):
        """
            vis  the jpg and hint images together 
        """

        if ssl_type == 'full':
            target_task_index = torch.arange(len(self.tasks))
            source_task_index = torch.arange(len(self.tasks))
        else:
            target_task_index = (w.data == 1).nonzero(as_tuple=False).view(-1) #* '1' indicates the label is available
            source_task_index = (w.data == 0).nonzero(as_tuple=False).view(-1) #* not available

        loss = torch.tensor(0.0).cuda()

        #*=====================================================================
        assert original_rgb_img is not None 
        original_rgb_img = original_rgb_img.unsqueeze(0).permute([0,2,3,1])
        is_resized_gt = False    
        cross_domain_loss = 0
        source_task_index = torch.tensor([1])
        target_task_index = torch.tensor([2])
        class_in_scene = gt[0].unique().tolist()
        class_in_scene = [self.nyud2_class_name_mapping[x+1] for x in class_in_scene if x != -1] #* the indexi in nyud2_class_name_mapping  starts from 1 
        # print(class_in_scene, 'are in the given scene')
        #*=====================================================================
        if len(source_task_index) > 0:
            for source_task in source_task_index:
                for target_task in target_task_index:
                    if source_task != target_task and source_task == 1 and target_task == 2 :
                        # source_pred = x_pred[source_task]
                        source_pred = gt[source_task]
                        target_gt = gt[target_task]
                        string_source = self.full_task_names[source_task.item()]
                        string_target = self.full_task_names[target_task.item()]
                        # A_taskpair = self.text_prompt + ' ' + string_source + ' ' + self.text_prompt_2 + ' ' + string_target
                        # A_taskpair = "a realistic and lifelike natural image that portrays scenes and details consistent with the provided depth image"
                        src_taskpair = "an authentic indoor scene, including {}, the details consistent with the provided {}".format( ','.join(class_in_scene), string_source)
                        tgt_taskpair = "an authentic indoor scene, including {}, the details consistent with the provided {}".format( ','.join(class_in_scene), string_target)
                        
                        # source_pred = process_prediction(source_pred,source_task)
                        source_pred = process_gt(source_pred,source_task)
                        target_gt = process_gt(target_gt,target_task)

                        #* Randomly crop to get square images
                        dim_1_num_pixels, dim_2_num_pixels = target_gt.shape[1], target_gt.shape[2]
                        difference = np.abs(dim_2_num_pixels - dim_1_num_pixels)
                        start = np.random.randint(low=0, high=difference + 1)
                        if dim_1_num_pixels < dim_2_num_pixels:
                            # dimension 1 shorter, crop dimension 2
                            end = start + dim_1_num_pixels
                            source_pred = source_pred[:, :, start:end, :]
                            target_gt = target_gt[:, :, start:end, :]
                            #!=======================================================
                            if not is_resized_gt:
                                original_rgb_img = original_rgb_img[:,:,start:end, :]
                                original_rgb_img = F.interpolate(original_rgb_img.unsqueeze(0).type(torch.float32), size=(512, 512, 3), mode='nearest').squeeze(0)
                                original_rgb_img = (original_rgb_img * 2 ) -1 
                                is_resized_gt = True
                            #!=======================================================
                        else:
                            # dimension 2 shorter, crop dimension 1
                            end = start + dim_2_num_pixels
                            source_pred = source_pred[:, start:end, :, :]
                            target_gt = target_gt[:, start:end, :, :]
                            #!=======================================================
                            if not is_resized_gt:
                                original_rgb_img = original_rgb_img[:, start:end, :, :]
                                original_rgb_img = F.interpolate(original_rgb_img.unsqueeze(0).type(torch.float32), size=(512, 512, 3), mode='nearest').squeeze(0)
                                original_rgb_img = (original_rgb_img * 2 ) -1 
                                is_resized_gt = True
                            #!=======================================================

                        # Interpolate square images to (512 * 512)
                        source_pred = F.interpolate(source_pred.unsqueeze(0).type(torch.float32), size=(512, 512, 3), mode='nearest').squeeze(0)
                        target_gt = F.interpolate(target_gt.unsqueeze(0).type(torch.float32), size=(512, 512, 3), mode='nearest').squeeze(0)


                        #* value range problem , by daniel
                        #* to range [-1,1]
                        #todo how does the  jpg image map into [-1,1]
                        target_gt = (target_gt/255)
                        source_pred = (source_pred/255)

                        # loss += get_diffusion_loss(original_rgb_img,source_pred,src_taskpair)
                        loss += self.get_diffusion_loss(original_rgb_img,target_gt,tgt_taskpair)
                        
                        
                        
                        """
                            vis(target_gt,'%s.jpg'%(string_target.replace(' ','#')))
                            vis(source_pred,'%s.jpg'%(string_source.replace(' ','#')))
                            vis(original_rgb_img,'original_rgb_img1.jpg')
                            gen_merged_img(original_rgb_img, target_gt, '%s#merge_into_rgb.jpg'%(string_target.replace(' ','#')))
                            gen_merged_img(original_rgb_img, source_pred,'%s#merge_into_rgb.jpg'%(string_source.replace(' ','#')))

                            #* inference diffusion
                            #todo 
                            # mapped_source_images = self.inference(source_pred,A_taskpair)
                            # cd_loss = torch.nn.functional.mse_loss(target_gt, mapped_source_images,reduction = 'mean')
                            # cross_domain_loss += cd_loss
                        """

                        
                        if step_index == 30: #* 720 
                            
                            mapped_tgt_images = self.inference(target_gt,tgt_taskpair,ddim_steps = 30)
                            save_dir_name = '/'.join(logger._core.handlers[1]._sink._path.split('/')[:-1])
                            
                            # with open (join(save_dir_name,datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#prompt.txt'),'w')  as f :
                            #     f.write('\n'.join([src_taskpair,tgt_taskpair]))
                            with open (join(save_dir_name,datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#prompt.txt'),'w')  as f :
                                f.write('\n'.join([tgt_taskpair]))
                            # with open (join(save_dir_name,datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#prompt.txt'),'w')  as f :
                                # f.write('\n'.join([src_taskpair]))

                            # vis(source_pred,
                            #     join(save_dir_name, 
                            #          datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#src_hint.jpg'))

                            # vis(mapped_src_images,
                            #     join(save_dir_name, 
                            #          datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#src_prediction.jpg'))
                            # gen_merged_img(original_rgb_img, source_pred,
                            #             join(save_dir_name, 
                            #          datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'%s#merge_into_rgb.jpg'%(string_source.replace(' ','#'))))

                            vis(target_gt,
                                join(save_dir_name, 
                                     datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#tgt_hint.jpg'))
                            vis(mapped_tgt_images,
                                join(save_dir_name, 
                                     datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#tgt_prediction.jpg'))

                            gen_merged_img(original_rgb_img, target_gt,  join(save_dir_name, 
                                        datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'%s#merge_into_rgb.jpg'%(string_target.replace(' ','#'))))
                            

                            vis(original_rgb_img,
                                join(save_dir_name, 
                                     datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#original_rgb_img.jpg'))
                        
        logger.info('loss: %f'%(loss.item()) + '\t step_index: %d'%(step_index))
        # return loss + cross_domain_loss
        return loss 

    """
    mapping [X,W,H] into [3,W,H] using painter
    #!painter 
    #* Process prediction
    """
    def painter_process(self,map, task_id,num_classes, is_gt = False):
        # Semantic Segmentation
        if task_id == 0:
            if is_gt:
                map = self.process_gt_segmentation(map, num_classes)
            else:
                map = self.process_predicted_segmentation(map)
        # Depth Estimation
        elif task_id == 1:
            map = map / 10 * 255
            map = map.squeeze(1).unsqueeze(-1)
            map = map.repeat(1, 1, 1, 3)
            
        elif task_id == 2:
            map = ((map + 1) / 2) * 255
            map = map.permute(0, 2, 3, 1)
            
        return map
    def get_diffusion_loss(self, jpg_ing, hint_img, txt_prompt):
        batch = {}
        batch['jpg'] = jpg_ing
        batch['hint'] = hint_img

        if isinstance(txt_prompt, list):
            batch['txt'] = txt_prompt
        else:
            batch['txt'] = [txt_prompt]
        x, c = self.model.get_input(batch, 'jpg')
        loss_value, loss_dict = self.model(x, c)
        return loss_value

    def mapping_back_forward_one4all(self, maps, w, step_index, original_rgb_img = None):
        """
            maps: these are used for mapping back, there are semantics, depth and normal sequentially
        """        
        
        original_rgb_img = original_rgb_img.unsqueeze(0).permute([0,2,3,1])
        original_rgb_img_resized = resize2square(original_rgb_img).unsqueeze(0).type(torch.float32)
        original_rgb_img_resized_interpolated = F.interpolate(original_rgb_img_resized ,\
            size=(512, 512, 3), mode='nearest').squeeze(0)
        original_rgb_img_resized_interpolated = (original_rgb_img_resized_interpolated * 2 ) -1 
        

        class_in_scene = maps[0].unique().tolist()
        class_in_scene = [self.nyud2_class_name_mapping[x+1] for x in class_in_scene if x != -1] #* the indexi in nyud2_class_name_mapping  starts from 1 
        #* all or labeled? 
        #* todo 
        all_task = np.arange(len(self.tasks)).tolist()
        labeled_task_index = (w.data == 1).nonzero(as_tuple=False).view(-1).cpu().numpy() #* '1' indicates the label is available


        B = len(labeled_task_index)
        cur_map_in_img = [self.painter_process( maps[i], i, self.input_channels[self.tasks[i]], is_gt = True)  for i in labeled_task_index]
        text_prompt = ["an authentic indoor scene, including {}, the details consistent with the provided {}".format(','.join(class_in_scene), self.full_task_names[i]) for i in labeled_task_index ]
         


        #!========================================================================
        #* Randomly crop to get square images

        cur_map_in_img_resized = resize2square(torch.cat(cur_map_in_img))
        cur_map_in_img_resized_interpolated = F.interpolate(cur_map_in_img_resized.unsqueeze(0).type(torch.float32),\
            size=(512, 512, 3), mode='nearest').squeeze(0)

        cur_map_in_img_resized_interpolated_normalized = (cur_map_in_img_resized_interpolated/255)
        
        # gen_merged_img(original_rgb_img_resized_interpolated, cur_map_in_img_resized_interpolated_normalized[0],  'a.jpg', 0.7)

        loss = self.get_diffusion_loss(original_rgb_img_resized_interpolated.repeat([B,1,1,1]),cur_map_in_img_resized_interpolated_normalized,text_prompt)
        
        if step_index % 300 == 0 : #* 720 
            controlnet_out = self.inference(cur_map_in_img_resized_interpolated_normalized,text_prompt,ddim_steps = 30)
            save_dir_name = '/'.join(logger._core.handlers[1]._sink._path.split('/')[:-1])
            
            with open (join(save_dir_name,datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#prompt.txt'),'w')  as f :
                f.write('\n'.join(text_prompt))
            
            if B > 1:
                for i in labeled_task_index:
                    vis(cur_map_in_img_resized_interpolated_normalized[i].unsqueeze(0),
                            join(save_dir_name, 
                            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#hint#%s.jpg'%(self.full_task_names[i].replace(' ','#'))))

                    vis(controlnet_out[i].unsqueeze(0),
                        join(save_dir_name, 
                                datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#controlnet_out#%s.jpg'%(self.full_task_names[i].replace(' ','#'))))

                    gen_merged_img(original_rgb_img_resized_interpolated, cur_map_in_img_resized_interpolated_normalized[i].unsqueeze(0),  join(save_dir_name, 
                                datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'merge_into_rgb#%s.jpg'%(self.full_task_names[i].replace(' ','#'))))
            
            vis(original_rgb_img_resized_interpolated,
                join(save_dir_name, 
                        datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#original_rgb_img.jpg'))
                
                    
        logger.info('loss: %f'%(loss.item()) + '\t step_index: %d'%(step_index))
        return loss 

    def mapping_back_forward(self, maps, w, step_index, original_rgb_img = None):
        """
            maps: these are used for mapping back, there are semantics, depth and normal sequentially
        """
        

        loss = torch.tensor(0.0).cuda()
        
        original_rgb_img = original_rgb_img.unsqueeze(0).permute([0,2,3,1])
        original_rgb_img_resized = resize2square(original_rgb_img).unsqueeze(0).type(torch.float32)
        original_rgb_img_resized_interpolated = F.interpolate(original_rgb_img_resized ,\
            size=(512, 512, 3), mode='nearest').squeeze(0)
        original_rgb_img_resized_interpolated = (original_rgb_img_resized_interpolated * 2 ) -1 
        

        class_in_scene = maps[0].unique().tolist()
        class_in_scene = [self.nyud2_class_name_mapping[x+1] for x in class_in_scene if x != -1] #* the indexi in nyud2_class_name_mapping  starts from 1 
        #* all or labeled? 
        #* todo 
        all_task = np.arange(len(self.tasks)).tolist()
        labeled_task_index = (w.data == 1).nonzero(as_tuple=False).view(-1).cpu().numpy() #* '1' indicates the label is available        
        for i in labeled_task_index:
            cur_map = maps[i]
            """

            draw_semantics(original_rgb_img.squeeze().permute(2,0,1).cpu(), cur_map.squeeze().cpu(), 0.6).save('a.jpg')  

            draw_painter_semantics(original_rgb_img.squeeze().permute(2,0,1).cpu(), cur_map_in_img.squeeze().cpu().numpy().astype(np.uint8), 0.6).save('b.jpg')  
            
            
            draw_painter_semantics(original_rgb_img_resized.squeeze().permute(2,0,1).cpu(), cur_map_in_img_resized.squeeze().cpu().numpy().astype(np.uint8), 0.6).save('c.jpg')  

            """
            text_prompt = "an authentic indoor scene, including {}, the details consistent with the provided {}".format(\
                    ','.join(class_in_scene), self.full_task_names[i])
            
            cur_map_in_img = self.painter_process(cur_map, i, self.input_channels[self.tasks[i]], is_gt = True)
            #* Randomly crop to get square images

            cur_map_in_img_resized = resize2square(cur_map_in_img)
            cur_map_in_img_resized_interpolated = F.interpolate(cur_map_in_img_resized.unsqueeze(0).type(torch.float32),\
                size=(512, 512, 3), mode='nearest').squeeze(0)

            cur_map_in_img_resized_interpolated_normalized = (cur_map_in_img_resized_interpolated/255)
            gen_merged_img(original_rgb_img_resized_interpolated, cur_map_in_img_resized_interpolated_normalized,  'a.jpg', 0.7)            
            loss += self.get_diffusion_loss(original_rgb_img_resized_interpolated,\
                cur_map_in_img_resized_interpolated_normalized,text_prompt)
            
            if step_index % 300 == 0 : #* 720 
                controlnet_out = self.inference(cur_map_in_img_resized_interpolated_normalized,text_prompt,ddim_steps = 50)
                save_dir_name = '/'.join(logger._core.handlers[1]._sink._path.split('/')[:-1])
                
                with open (join(save_dir_name,datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#prompt.txt'),'w')  as f :
                    f.write('\n'.join([text_prompt]))
                

                vis(cur_map_in_img_resized_interpolated_normalized,
                    join(save_dir_name, 
                            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#hint.jpg'))
                
                vis(controlnet_out,
                    join(save_dir_name, 
                            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#controlnet_out.jpg'))

                gen_merged_img(original_rgb_img_resized_interpolated, cur_map_in_img_resized_interpolated_normalized,  join(save_dir_name, 
                            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'%s#merge_into_rgb.jpg'%(self.full_task_names[i].replace(' ','#'))))
                
                vis(original_rgb_img_resized_interpolated,
                    join(save_dir_name, 
                            datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'#original_rgb_img.jpg'))
                
                    
        logger.info('loss: %f'%(loss.item()) + '\t step_index: %d'%(step_index))
        return loss 

    """
    ddim_steps: #! key for the speed 
    """
    def inference(self,detected_map, prompt, ddim_steps = 30):
        # todo: sync with huan-ang 
        
         
        guess_mode =  False
        strength = 1
        scale = 5#* original parameter is 9 
        seed = -1 #* fixed seed 
        eta = 0
        B,W,H,_ = detected_map.shape
        num_samples = B #* key for the number of generated image 
        
        with torch.no_grad():
            """
                # input_image = HWC3(input_image)
                # detected_map = apply_uniformer(resize_image(input_image, detect_resolution)) #* segmentation map 
                # img = resize_image(input_image, image_resolution)
                # H, W, C = img.shape
                # detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_NEAREST)
            """ 
            assert not torch.any(detected_map > 1) and not torch.any(detected_map < 0)   #* equal to torch.all(detected_map <= 1) and torch.all(detected_map >= 0)  
            control = detected_map.clone().squeeze() #* ensure the value range of detected_map is between 0 and 1
            if len(detected_map.shape) <4: #* only one sample is feed in 
                control = torch.stack([control for _ in range(num_samples)], dim=0)#* add the batch channel
            control = einops.rearrange(control, 'b h w c -> b c h w').clone()

            if seed == -1:
                seed = random.randint(0, 65535)
            seed_everything(seed)
            if isinstance(prompt, list):
                cond = {"c_concat": [control], "c_crossattn": [self.model.get_learned_conditioning(prompt )]}
            else:
                cond = {"c_concat": [control], "c_crossattn": [self.model.get_learned_conditioning([prompt ] * num_samples)]}

            un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [self.model.get_learned_conditioning([""] * num_samples)]}
            shape = (4, H // 8, W // 8)

            #* Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
            self.model.control_scales = [strength * (0.825 ** float(12 - i)) \
                for i in range(13)] if guess_mode else ([strength] * 13) 
            
            samples, intermediates = self.ddim_sampler.sample(ddim_steps, num_samples,
                                                        shape, cond, verbose=False, eta=eta,
                                                        unconditional_guidance_scale=scale,
                                                        unconditional_conditioning=un_cond)

            
            x_samples = self.model.decode_first_stage(samples) #* from [B,4,96,64] -->  [B,3,768,512]
            # x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
            # results = [x_samples[i] for i in range(num_samples)]
            x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).clip(0, 255)
        return x_samples




def gen_merged_img(jpg_in_tensor, hint_in_tensor,save_name, jpg_ratio= 0.6):
    cv2.imwrite(save_name, \
        cv2.addWeighted((jpg_in_tensor.squeeze().cpu().numpy()+1)/2 * 255,\
        jpg_ratio, hint_in_tensor.squeeze().cpu().numpy() * 255, 1 - jpg_ratio, 0))




def resize2square(img):
    dim_1_num_pixels, dim_2_num_pixels = img.shape[1], img.shape[2]
    difference = np.abs(dim_2_num_pixels - dim_1_num_pixels)
    # start = np.random.randint(low=0, high=difference + 1)
    start = 1
    if dim_1_num_pixels < dim_2_num_pixels:
        # dimension 1 shorter, crop dimension 2
        end = start + dim_1_num_pixels
        img = img[:, :, start:end, :]
    else:
        # dimension 2 shorter, crop dimension 1
        end = start + dim_2_num_pixels
        img = img[:, start:end, :, :]
    return img

"""
vis the source_pred image in tensor format and save it as "%s.jpg"%(save_name)
the format is in [B, W,H,3]
"""
def vis(source_pred,save_name = None):
    transform = T.ToPILImage()
    tmp = ((source_pred - source_pred.min()) / (source_pred.max() - source_pred.min()))
    transform(tmp.permute(0,3,1,2).squeeze()).save(save_name)
