from pathlib import Path
from typing import Any, Dict, Union, List

import cv2, os
import einops, datetime
import imageio
import numpy as np
import pyrallis
import torch, ipdb, shutil
import torch.nn.functional as F
from PIL import Image
from loguru import logger
from matplotlib import cm
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm

from src import utils
from src.configs.train_config import TrainConfig
from src.models.textured_mesh import TexturedMeshModel
from src.stable_diffusion_depth import StableDiffusion
from src.training.views_dataset import ViewsDataset, MultiviewDataset
from src.utils import make_path, tensor2numpy, calculate_weight_map
from torch import autocast 
from torch.cuda.amp import GradScaler

from torch.autograd import Variable


class TEXTure:
    def __init__(self, cfg: TrainConfig):
        self.cfg = cfg
        self.view_idx = 0
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        utils.seed_everything(self.cfg.optim.seed)

        # Remove the old experiment folder if it exists
        shutil.rmtree(self.cfg.log.exp_dir / datetime.datetime.now().strftime("%m-%d_%H-%M"), ignore_errors=True)

        # Make view_dirs
        self.exp_path = make_path(self.cfg.log.exp_dir / datetime.datetime.now().strftime("%m-%d_%H-%M"))
        self.ckpt_path = make_path(self.exp_path / 'checkpoints')
        self.train_renders_path = make_path(self.exp_path / 'vis' / 'train')
        self.eval_renders_path = make_path(self.exp_path / 'vis' / 'eval')
        self.final_renders_path = make_path(self.exp_path / 'results')

        self.init_logger()
        pyrallis.dump(self.cfg, (self.exp_path / 'config.yaml').open('w'))

        self.view_dirs = ['front', 'left', 'back', 'right', 'overhead', 'bottom']
        self.diffusion = self.init_diffusion()
        self.mesh_model = self.init_mesh_model()
        self.text_z, self.text_string = self.calc_text_embeddings()
        self.dataloaders = self.init_dataloaders()
        self.back_im = torch.Tensor(np.array(Image.open(self.cfg.guide.background_img).convert('RGB'))).to(
            self.device).permute(2, 0,
                                 1) / 255.0
        self.debug_view_num = self.cfg.render.debug_view_num
        self.update_step = self.cfg.optim.update_step
        self.latent_mode = self.cfg.guide.latent_mode
        logger.info(f'Successfully initialized {self.cfg.log.exp_name}')

    def init_mesh_model(self) -> nn.Module:
        cache_path = Path('cache') / Path(self.cfg.guide.shape_path).stem
        cache_path.mkdir(parents=True, exist_ok=True)
        model = TexturedMeshModel(self.cfg.guide, device=self.device,
                                  render_grid_size=self.cfg.render.train_grid_size,
                                  cache_path=cache_path,
                                  texture_resolution=self.cfg.guide.texture_resolution,
                                  augmentations=False)

        model = model.to(self.device)
        logger.info(
            f'Loaded Mesh, #parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad])}')
        logger.info(model)
        return model

    def init_diffusion(self) -> Any:
        diffusion_model = StableDiffusion(self.device, model_name=self.cfg.guide.diffusion_name,
                                          concept_name=self.cfg.guide.concept_name,
                                          concept_path=self.cfg.guide.concept_path,
                                          latent_mode=False,
                                          min_timestep=self.cfg.optim.min_timestep,
                                          max_timestep=self.cfg.optim.max_timestep,
                                          no_noise=self.cfg.optim.no_noise,
                                          use_inpaint=True,
                                          scheduler_name=self.cfg.optim.scheduler_name)

        for p in diffusion_model.parameters():
            p.requires_grad = False
        diffusion_model.log_overlap_train_image = self.log_overlap_train_image
        return diffusion_model

    def calc_text_embeddings(self) -> Union[torch.Tensor, List[torch.Tensor]]:
        ref_text = self.cfg.guide.text
        if not self.cfg.guide.append_direction:
            text_z = self.diffusion.get_text_embeds([ref_text])
            text_string = ref_text
        else:
            text_z = []
            text_string = []
            for d in self.view_dirs:
                text = ref_text.format(d)
                text_string.append(text)
                logger.info(text)
                negative_prompt = None
                logger.info(negative_prompt)
                text_z.append(self.diffusion.get_text_embeds([text], negative_prompt=negative_prompt))
        return text_z, text_string

    def init_dataloaders(self) -> Dict[str, DataLoader]:
        init_train_dataloader = MultiviewDataset(self.cfg.render, device=self.device).dataloader()

        val_loader = ViewsDataset(self.cfg.render, device=self.device,
                                  size=self.cfg.log.eval_size).dataloader()
        # Will be used for creating the final video
        val_large_loader = ViewsDataset(self.cfg.render, device=self.device,
                                        size=self.cfg.log.full_eval_size).dataloader()
        dataloaders = {'train': init_train_dataloader, 'val': val_loader,
                       'val_large': val_large_loader}
        return dataloaders

    def init_logger(self):
        logger.remove()  # Remove default logger
        log_format = "<green>{time:YYYY-MM-DD HH:mm:ss}</green> <level>{message}</level>"
        logger.add(lambda msg: tqdm.write(msg, end=""), colorize=True, format=log_format)
        logger.add(self.exp_path / 'log.txt', colorize=False, format=log_format)

    def paint(self):
        logger.info('Starting training ^_^')
        # Evaluate the initialization
        self.evaluate(self.dataloaders['val'], self.eval_renders_path)
        self.mesh_model.train()

        pbar = tqdm(total=len(self.dataloaders['train']), initial=self.view_idx,
                    bar_format='{desc}: {percentage:3.0f}% painting step {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')

        for data in self.dataloaders['train']:
            self.view_idx += 1
            pbar.update(1)
            self.paint_viewpoint(data)
            self.evaluate(self.dataloaders['val'], self.eval_renders_path)
            self.mesh_model.train()

        self.mesh_model.change_default_to_median()
        logger.info('Finished Painting ^_^')
        logger.info('Saving the last result...')
        self.full_eval()
        logger.info('\tDone!')

    def evaluate(self, dataloader: DataLoader, save_path: Path, save_as_video: bool = False):
        logger.info(f'Evaluating and saving model, painting iteration #{self.view_idx}...')
        self.mesh_model.eval()
        save_path.mkdir(exist_ok=True)

        if save_as_video:
            all_preds = []
        for i, data in enumerate(dataloader):
            preds, textures, depths, normals = self.eval_render(data)

            pred = tensor2numpy(preds[0])

            if save_as_video:
                all_preds.append(pred)
            else:
                Image.fromarray(pred).save(save_path / f"step_{self.view_idx:05d}_{i:04d}_rgb.jpg")
                Image.fromarray((cm.seismic(normals[0, 0].cpu().numpy())[:, :, :3] * 255).astype(np.uint8)).save(
                    save_path / f'{self.view_idx:04d}_{i:04d}_normals_cache.jpg')
                if self.view_idx == 0:
                    # Also save depths for debugging
                    torch.save(depths[0], save_path / f"{i:04d}_depth.pt")

        # Texture map is the same, so just take the last result
        texture = tensor2numpy(textures[0])
        Image.fromarray(texture).save(save_path / f"step_{self.view_idx:05d}_texture.png")

        if save_as_video:
            all_preds = np.stack(all_preds, axis=0)

            dump_vid = lambda video, name: imageio.mimsave(save_path / f"step_{self.view_idx:05d}_{name}.mp4", video,
                                                           fps=25,
                                                           quality=8, macro_block_size=1)

            dump_vid(all_preds, 'rgb')
        logger.info('Done!')

    def full_eval(self, output_dir: Path = None):
        if output_dir is None:
            output_dir = self.final_renders_path
        self.evaluate(self.dataloaders['val_large'], output_dir, save_as_video=True)
        # except:
        #     logger.error('failed to save result video')

        if self.cfg.log.save_mesh:
            save_path = make_path(self.exp_path / 'mesh')
            logger.info(f"Saving mesh to {save_path}")

            self.mesh_model.export_mesh(save_path)

            logger.info(f"\tDone!")

    def update_latent_uvmap(self, object_mask, render_cache, background, latent, uvmap_mask, uvmap_all):
        # fusion the uvmap
        hw_corrdinates = render_cache['hw_corrdinates']
        uvmap_all[:, :, hw_corrdinates[0, :, :, 0], hw_corrdinates[0, :, :, 1]] += latent
        uvmap_mask[:, :, hw_corrdinates[0, :, :, 0], hw_corrdinates[0, :, :, 1]] += 1

        return uvmap_mask, uvmap_all

    def update_latent(self, latent, ref_img, update_mask, step = 10, loss_on_x0 = False, text_z = None, cropped_depth_render = None, t = None):
        """"
        update the decode imgs from latent to be same with the reference img 
        """
        with torch.enable_grad():
            ref_img = ref_img.detach().requires_grad_(False) * update_mask
            scaler = GradScaler() 
            logger.info(f'--- Update latent ---')
            latent = torch.nn.Parameter(latent.detach().data.float(), requires_grad=True)
            # self.optimizer = torch.optim.SGD([latent], lr = 1)
            self.optimizer = torch.optim.Adam([latent], lr=1e-2)

            with tqdm(total= step, desc=f'update latent step {step}') as pbar:
                for _ in range(step):
                    self.optimizer.zero_grad()

                    with autocast("cuda", dtype=torch.float16):
                        if loss_on_x0:
                            latent_origin = self.diffusion.img2img_single_step(text_z, latent, cropped_depth_render, t, guidance_scale=self.cfg.guide.guidance_scale, func= 'get_x_origin')
                            decoded_img = self.diffusion.decode_latents(latent_origin, require_grad= True) * update_mask
                        else:
                            decoded_img = self.diffusion.decode_latents(latent, require_grad= True) * update_mask
                        if self.cfg.optim.loss_type == 'mse':
                            loss = torch.nn.functional.mse_loss(decoded_img, ref_img)
                        elif self.cfg.optim.loss_type == 'l1':
                            loss = torch.nn.functional.l1_loss(decoded_img, ref_img)
                    
                    scaler.scale(loss).backward()
                    scaler.step(self.optimizer)
                    scaler.update()

                    pbar.set_postfix({'loss': '%.2f'%loss.item()})
                    pbar.update(1)

        self.log_overlap_train_image(decoded_img * update_mask, f'decoded_img_after_update')
        return latent.data.half()  # Return the updated latent
    
    def export_texture_map(self, rgb_output, render_cache, background, object_mask, update_mask):
        """
        export the texture map from the rgb_output
        """
        texture_img_backup = self.mesh_model.texture_img.data.clone().detach() # backup the uvmap
        # fit the uvmap
        _ = self.project_back_overlap(render_cache=render_cache, background=background, rgb_output=rgb_output,
                                        update_mask=update_mask, object_mask=object_mask)
        # save the uvmap
        uvmap_after_project = self.mesh_model.texture_img.clone().detach()
        self.mesh_model.texture_img.data = texture_img_backup
        return uvmap_after_project

    def init_render_cache(self):
        self.denoise_step = 'init'
        self.img_idx = 0
        # Set background image
        if self.cfg.guide.use_background_color:
            background = torch.Tensor([0, 0.8, 0]).to(self.device)
        else:
            background = F.interpolate(self.back_im.unsqueeze(0), (512, 512),mode='bilinear', align_corners=False)
            background = self.diffusion.encode_imgs(background).detach() if self.latent_mode else background

        self.render_cache = [{'render_cache': None, 'object_mask': None, 'text_z': None, 'text_string': None, 'update_mask': None, 'crop_bound': None, 'cropped_rgb_render': None, 'cropped_update_mask': None, 'cropped_depth_render': None, 'meta_texture_map': None, 'background': background} for _ in range(10)]

        # init the texture map 
        if not self.latent_mode:
            original_latent = torch.randn(1, 4, 128, 128).to(self.device)
            self.mesh_model.texture_img.data = self.diffusion.decode_latents(original_latent).detach()
            self.log_overlap_train_image(self.mesh_model.texture_img, name='init_uvmap')
        else:
            self.mesh_model.texture_img.data = torch.randn_like(self.mesh_model.texture_img.data).detach()

        for view_idx, data in enumerate(self.dataloaders['train']):
            if view_idx > self.debug_view_num:
                break

            logger.info(f'--- Init Painting step #{view_idx} ---')
            theta, phi, radius = data['theta'], data['phi'], data['radius']
            # If offset of phi was set from code
            phi = phi - np.deg2rad(self.cfg.render.front_offset)
            phi = float(phi + 2 * np.pi if phi < 0 else phi)
            logger.info(f'Painting from theta: {theta}, phi: {phi}, radius: {radius}')

            # Render from viewpoint
            outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius, background=background, render_cache = None)
            render_cache = outputs['render_cache']
            depth_render = outputs['depth']
            # Render again with the median value to use as rgb, we shouldn't have color leakage, but just in case
            outputs = self.mesh_model.render(background=background,
                                            render_cache=render_cache, use_median=False)
            rgb_render = outputs['image']
            object_mask = outputs['mask']
            # ipdb.set_trace() # check the size of the mask
            self.render_cache[view_idx]['background'] = background
            self.render_cache[view_idx]['render_cache'] = render_cache
            self.render_cache[view_idx]['object_mask'] = object_mask
            self.render_cache[view_idx]['rgb_render'] = rgb_render
            # text embeddings
            if self.cfg.guide.append_direction:
                dirs = data['dir']  # [B,]
                text_z = self.text_z[dirs]
                text_string = self.text_string[dirs]
            else:
                text_z = self.text_z
                text_string = self.text_string
            logger.info(f'text: {text_string}')

            self.render_cache[view_idx]['text_z'] = text_z
            self.render_cache[view_idx]['text_string'] = text_string

            update_mask = self.cal_update_mask(rgb_render_raw=rgb_render,
                                                depth_render=depth_render,
                                                mask=outputs['mask'])
            
            self.render_cache[view_idx]['update_mask'] = update_mask

            # Crop to inner region based on object mask
            min_h, min_w, max_h, max_w = utils.get_nonzero_region(outputs['mask'][0, 0])
            crop = lambda x: x[:, :, min_h:max_h, min_w:max_w]
            cropped_rgb_render = crop(rgb_render)
            cropped_update_mask = crop(update_mask)
            cropped_depth_render = crop(depth_render)

            self.render_cache[view_idx]['crop_bound'] = (min_h, min_w, max_h, max_w)
            self.render_cache[view_idx]['cropped_rgb_render'] = cropped_rgb_render
            self.render_cache[view_idx]['cropped_update_mask'] = cropped_update_mask
            self.render_cache[view_idx]['cropped_depth_render'] = cropped_depth_render

            # fit the uvmap
            if not self.latent_mode:
                _ = self.project_back_meta_texture(render_cache=render_cache)
                self.render_cache[view_idx]['meta_texture_map'] = self.mesh_model.meta_texture_img.clone().detach()[:, :1]

                self.log_overlap_train_image(self.render_cache[view_idx]['meta_texture_map'][0,0], name=f'view_{view_idx}_meta_texture_map', colormap=True)
                self.mesh_model.meta_texture_img = nn.Parameter(torch.zeros_like(self.mesh_model.texture_img))

        if not self.latent_mode:
            # calculate the normal weight map
            self.normal_weight_map = calculate_weight_map(torch.cat([self.render_cache[i]['meta_texture_map'] for i in range(self.debug_view_num + 1)], dim=0))
            self.log_overlap_train_image(self.normal_weight_map[0,0], name=f'view_{0}_normal_weight_map', colormap=True)
            # self.log_overlap_train_image(self.normal_weight_map[1,0], name=f'view_{1}_normal_weight_map', colormap=True)

    def load_render_cache(self, view_idx):
        text_z = self.render_cache[view_idx]['text_z']
        cropped_rgb_render = self.render_cache[view_idx]['cropped_rgb_render']
        cropped_update_mask = self.render_cache[view_idx]['cropped_update_mask']
        cropped_depth_render = self.render_cache[view_idx]['cropped_depth_render']
        render_cache = self.render_cache[view_idx]['render_cache']
        background = self.render_cache[view_idx]['background']
        min_h, min_w, max_h, max_w = self.render_cache[view_idx]['crop_bound']
        object_mask = self.render_cache[view_idx]['object_mask']
        update_mask = self.render_cache[view_idx]['update_mask']
        rgb_render = self.render_cache[view_idx]['rgb_render']
        return text_z, cropped_rgb_render, cropped_update_mask, cropped_depth_render, render_cache, background, min_h, min_w, max_h, max_w, object_mask, update_mask, rgb_render
    
    def paint_overlap_imgx0(self):
        """
        paint by overlapping at image space 
        """
        self.init_render_cache()
        self.mesh_model.train()
        
        num_inference_steps = 50
        self.diffusion.scheduler.set_timesteps(num_inference_steps)
        timesteps, num_inference_steps = self.diffusion.get_timesteps(num_inference_steps, 1.0)
        self.latent_cache = [{'latent': None,'latent_origin': None} for _ in range(self.debug_view_num + 1)]
                    
        for i, t in tqdm(enumerate(timesteps)):
            is_overlap = ( i < self.cfg.optim.overlap_range)
            uvmapx0_list = []
            self.denoise_step = t
            self.img_idx = 0

            # original img fusion into the uvmap
            for self.view_idx, data in enumerate(self.dataloaders['train']):

                # break the loop if the paint_step is larger than 1
                if self.view_idx > self.debug_view_num:
                    self.view_idx -=1 
                    break

                logger.info(f'--- Load the render cache! ---')
                text_z, cropped_rgb_render, cropped_update_mask, cropped_depth_render, render_cache, background, min_h, min_w, max_h, max_w, object_mask, update_mask, rgb_render = self.load_render_cache(self.view_idx)

                if self.latent_cache[self.view_idx]['latent'] is None:
                    rand_input = F.interpolate(cropped_rgb_render, (512, 512), mode='bilinear',
                                         align_corners=False)
                    self.log_overlap_train_image(rand_input, name='rand_input_for_init_latent')
                    self.latent_cache[self.view_idx]['latent'] = self.diffusion.encode_imgs(rand_input)
                # get the latent_origin and the img_origin, only fisrt time the prev latent is None and need the cropped input 
                # latents, latent_origin, cropped_img_origin = self.diffusion.overlap_single_step(text_z, 
                #                                         cropped_rgb_render.detach(),
                #                                         cropped_depth_render.detach(),
                #                                         guidance_scale=self.cfg.guide.guidance_scale,
                #                                         i_num=i,
                #                                         denoise_step=t,
                #                                         update_mask=cropped_update_mask,
                #                                         fixed_seed=self.cfg.optim.seed,
                #                                         intermediate_vis=self.cfg.log.vis_diffusion_steps,
                #                                         latent_prev=self.latent_cache[self.view_idx]['latent'])
                latent_origin = self.diffusion.img2img_single_step(text_z, self.latent_cache[self.view_idx]['latent'], cropped_depth_render, t, guidance_scale=self.cfg.guide.guidance_scale, func= 'get_x_origin')
                self.latent_cache[self.view_idx]['latent_origin'] = latent_origin

                if is_overlap:
                    cropped_img_origin = self.diffusion.decode_latents(latent_origin)

                    # Extend rgb_output to full image size
                    rgb_output = rgb_render.clone()
                    rgb_output[:, :, min_h:max_h, min_w:max_w] = F.interpolate(cropped_img_origin,
                                                    (cropped_rgb_render.shape[2], cropped_rgb_render.shape[3]),
                                                    mode='bilinear', align_corners=False)
                    self.log_overlap_train_image(rgb_output, name='output_origin_full_size')

                    uvmapx0_list.append(self.export_texture_map(rgb_output, render_cache, background, object_mask, update_mask))

            if is_overlap:
                for i in range(self.debug_view_num + 1):
                        self.log_overlap_train_image(uvmapx0_list[i] * self.normal_weight_map[i], name=f'view_{i}_uvmap_weighted')
                
                weighted_uvmapx0 = torch.mul(torch.cat(uvmapx0_list, dim=0), self.normal_weight_map[0:self.debug_view_num + 1]).sum(dim=0, keepdim=True)

                # update the uvmap
                self.mesh_model.texture_img.data = weighted_uvmapx0
                self.log_overlap_train_image(self.mesh_model.texture_img, name='uvmapx_after_change')
            
            # begin update the latent_origin
            for self.view_idx, data in enumerate(self.dataloaders['train']):
                
                # break the loop if the paint_step is larger than 1
                if self.view_idx > self.debug_view_num:
                    self.view_idx -=1
                    break

                logger.info(f'--- Update original img, view: #{self.view_idx} ---')
                theta, phi, radius = data['theta'], data['phi'], data['radius']
                # If offset of phi was set from code
                phi = phi - np.deg2rad(self.cfg.render.front_offset)
                phi = float(phi + 2 * np.pi if phi < 0 else phi)
                logger.info(f'Painting from theta: {theta}, phi: {phi}, radius: {radius}')

                # Render from viewpoint
                outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius, background=background, 
                                                render_cache = self.render_cache[self.view_idx]['render_cache'], use_median=False)

                rgb_render = outputs['image']
                depth_render = outputs['depth']
                # self.log_overlap_train_image(depth_render[0, 0], 'depth', colormap=True)
                update_mask = self.cal_update_mask(rgb_render_raw=rgb_render,
                                                    depth_render=depth_render,
                                                    mask=outputs['mask'])
                
                # Crop to inner region based on object mask
                min_h, min_w, max_h, max_w = utils.get_nonzero_region(outputs['mask'][0, 0])
                crop = lambda x: x[:, :, min_h:max_h, min_w:max_w]
                cropped_rgb_render = crop(rgb_render)
                cropped_depth_render = crop(depth_render)
                cropped_update_mask = crop(update_mask)
                cropped_update_mask = F.interpolate(cropped_update_mask, (512, 512))
                ref_img = F.interpolate(cropped_rgb_render, (512, 512), mode='bilinear', align_corners=False)
                self.log_overlap_train_image(ref_img * cropped_update_mask , f'update_img: ref_img') if is_overlap else None

                if self.cfg.optim.update_target == 'x0':
                    # update latent_origin  
                    if self.debug_view_num > 0 and is_overlap:
                        latent_origin = self.update_latent(self.latent_cache[self.view_idx]['latent_origin'], ref_img, update_mask=cropped_update_mask, step=self.update_step)
                    else:
                        latent_origin = self.latent_cache[self.view_idx]['latent_origin']

                    # according to latent_origin get latent 
                    self.latent_cache[self.view_idx]['latent'] = self.diffusion.step_from_origin_ddpm(self.latent_cache[self.view_idx]['latent'], latent_origin, t)
                elif self.cfg.optim.update_target == 'xt':
                    # update latent  
                    logger.info('Update the latent xt by the loss of original img x0')
                    if self.debug_view_num > 0 and is_overlap:
                        latent = self.update_latent(self.latent_cache[self.view_idx]['latent'], ref_img, update_mask=cropped_update_mask, step=self.update_step, loss_on_x0 = True, text_z = self.render_cache[self.view_idx]['text_z'], cropped_depth_render = cropped_depth_render, t = t)
                    else:
                        latent = self.diffusion.step_from_origin_ddpm(self.latent_cache[self.view_idx]['latent'], latent_origin, t)

                    # according to latent_origin get latent 
                    self.latent_cache[self.view_idx]['latent'] = latent

        # project back latent img to get the final result
        uvmap_list = []
        self.denoise_step = 'Final'

        for self.view_idx, data in enumerate(self.dataloaders['train']):

            # break the loop if the paint_step is larger than 1
            if self.view_idx > self.debug_view_num:
                self.view_idx -=1
                break

            logger.info(f'--- Painting step #{self.view_idx} ---')
            text_z, cropped_rgb_render, cropped_update_mask, cropped_depth_render, render_cache, background, min_h, min_w, max_h, max_w, object_mask, update_mask, rgb_render = self.load_render_cache(self.view_idx)
                
            decode_img = self.diffusion.decode_latents(self.latent_cache[self.view_idx]['latent'].float())

            rgb_output = rgb_render.clone()
            rgb_output[:, :, min_h:max_h, min_w:max_w] = F.interpolate(decode_img,
                                                (cropped_rgb_render.shape[2], cropped_rgb_render.shape[3]),
                                                mode='bilinear', align_corners=False)
            # self.log_overlap_train_image(rgb_output, name='output_origin_full_size') 
            
            uvmap_list.append(self.export_texture_map(rgb_output, render_cache, background, object_mask, update_mask))

        weighted_uvmap = torch.mul(torch.cat(uvmap_list, dim=0), self.normal_weight_map[0:self.debug_view_num + 1]).sum(dim=0, keepdim=True)
        self.mesh_model.texture_img.data = weighted_uvmap
        self.log_overlap_train_image(self.mesh_model.texture_img, name='final_uvmap_after_change')

    def paint_overlap_imgxt(self):
        """
        paint by overlapping at image space 
        """
        self.init_render_cache()
        self.mesh_model.train()
        
        num_inference_steps = 50
        self.diffusion.scheduler.set_timesteps(num_inference_steps)
        timesteps, num_inference_steps = self.diffusion.get_timesteps(num_inference_steps, 1.0)
        self.latent_cache = [{'latent': None,'latent_origin': None,'meta_texture_map': None} for _ in range(10)]

        for i, t in tqdm(enumerate(timesteps)):
            is_overlap = (0 < i < self.cfg.optim.overlap_range)
            uvmapxt_list = []
            self.denoise_step = t
            self.img_idx = 0

            # original img fusion into the uvmap
            for self.view_idx, data in enumerate(self.dataloaders['train']):

                # break the loop if the paint_step is larger than 1
                if self.view_idx > self.debug_view_num:
                    self.view_idx -=1
                    break

                logger.info(f'--- Load the render cache! ---')
                text_z, cropped_rgb_render, cropped_update_mask, cropped_depth_render, render_cache, background, min_h, min_w, max_h, max_w, object_mask, update_mask, rgb_render = self.load_render_cache(self.view_idx)
                

                latent = self.diffusion.img2img_single_step(text_z, self.latent_cache[self.view_idx]['latent'], cropped_depth_render, t, guidance_scale=self.cfg.guide.guidance_scale)
                if "PNDM" in self.diffusion.scheduler.__class__.__name__:
                    self.diffusion.scheduler.counter -= 1
                self.latent_cache[self.view_idx]['latent'] = latent

                if is_overlap:
                    cropped_img_xt = self.diffusion.decode_latents(latent)
                    
                    # Extend rgb_output to full image size
                    rgb_output = rgb_render.clone()
                    rgb_output[:, :, min_h:max_h, min_w:max_w] = F.interpolate(cropped_img_xt,
                                                    (cropped_rgb_render.shape[2], cropped_rgb_render.shape[3]),
                                                    mode='bilinear', align_corners=False)
                    self.log_overlap_train_image(rgb_output, name='output_origin_full_size')

                    uvmapxt_list.append(self.export_texture_map(rgb_output, render_cache, background, object_mask, update_mask))
            
            if "PNDM" in self.diffusion.scheduler.__class__.__name__:
                self.diffusion.scheduler.counter += 1
            
            if is_overlap:

                # log weighted uvmap
                for i in range(self.view_idx + 1):
                    self.log_overlap_train_image(uvmapxt_list[i] * self.normal_weight_map[i], name=f'view_{i}_uvmap_weighted')
                
                weighted_uvmapxt = torch.mul(torch.cat(uvmapxt_list, dim=0), self.normal_weight_map[0:self.view_idx + 1]).sum(dim=0, keepdim=True)

                # update the uvmap
                self.mesh_model.texture_img.data = weighted_uvmapxt
                self.log_overlap_train_image(self.mesh_model.texture_img, name='uvmapxt_after_fusuion')
                
                # begin update the latent
                for self.view_idx, data in enumerate(self.dataloaders['train']):

                    # break the loop if the paint_step is larger than 1
                    if self.view_idx > self.debug_view_num:
                        self.view_idx -=1
                        break

                    logger.info(f'--- Update original img, view: #{self.view_idx} ---')
                    theta, phi, radius = data['theta'], data['phi'], data['radius']
                    # If offset of phi was set from code
                    phi = phi - np.deg2rad(self.cfg.render.front_offset)
                    phi = float(phi + 2 * np.pi if phi < 0 else phi)
                    logger.info(f'Painting from theta: {theta}, phi: {phi}, radius: {radius}')

                    # Render from viewpoint
                    outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius, background=background, 
                                                    render_cache = self.render_cache[self.view_idx]['render_cache'], use_median=False)

                    rgb_render = outputs['image']
                    depth_render = outputs['depth']
                    # self.log_overlap_train_image(depth_render[0, 0], 'depth', colormap=True)
                    update_mask = self.cal_update_mask(rgb_render_raw=rgb_render,
                                                        depth_render=depth_render,
                                                        mask=outputs['mask'])
                    
                    # Crop to inner region based on object mask
                    min_h, min_w, max_h, max_w = utils.get_nonzero_region(outputs['mask'][0, 0])
                    crop = lambda x: x[:, :, min_h:max_h, min_w:max_w]
                    cropped_rgb_render = crop(rgb_render)
                    cropped_update_mask = crop(update_mask)
                    cropped_update_mask = F.interpolate(cropped_update_mask, (512, 512))
                    ref_img = F.interpolate(cropped_rgb_render, (512, 512), mode='bilinear', align_corners=False)
                    self.log_overlap_train_image(ref_img * cropped_update_mask , f'update_img: ref_img')

                    # update latent  
                    if self.debug_view_num > 0:
                        latent = self.update_latent(self.latent_cache[self.view_idx]['latent'], ref_img, update_mask=cropped_update_mask, step=self.update_step)
                    else:
                        latent = self.latent_cache[self.view_idx]['latent']

                    self.latent_cache[self.view_idx]['latent'] = latent
       
        # project back latent img to get the final result
        uvmap_list = []
        self.denoise_step = 'Final'

        for self.view_idx, data in enumerate(self.dataloaders['train']):

            # break the loop if the paint_step is larger than 1
            if self.view_idx > self.debug_view_num:
                self.view_idx -=1
                break

            logger.info(f'--- Painting step #{self.view_idx} ---')
            text_z, cropped_rgb_render, cropped_update_mask, cropped_depth_render, render_cache, background, min_h, min_w, max_h, max_w, object_mask, update_mask, rgb_render = self.load_render_cache(self.view_idx)
                
            decode_img = self.diffusion.decode_latents(self.latent_cache[self.view_idx]['latent'].float())

            rgb_output = rgb_render.clone()
            rgb_output[:, :, min_h:max_h, min_w:max_w] = F.interpolate(decode_img,
                                                (cropped_rgb_render.shape[2], cropped_rgb_render.shape[3]),
                                                mode='bilinear', align_corners=False)
            self.log_overlap_train_image(rgb_output, name='output_origin_full_size(need_verify)') 
            
            uvmap_list.append(self.export_texture_map(rgb_output, render_cache, background, object_mask, update_mask))

        weighted_uvmap = torch.mul(torch.cat(uvmap_list, dim=0), self.normal_weight_map[0:self.debug_view_num + 1]).sum(dim=0, keepdim=True)
        self.mesh_model.texture_img.data = weighted_uvmap
        self.log_overlap_train_image(self.mesh_model.texture_img, name='final_uvmap_after_change')

    def paint_overlap_latent(self):
        
        self.init_render_cache()
        num_inference_steps = 50
        self.diffusion.scheduler.set_timesteps(num_inference_steps)
        timesteps, num_inference_steps = self.diffusion.get_timesteps(num_inference_steps, 1.0)
        self.latent_cache = [{'latent': None,'latent_origin': None} for _ in range(10)]

        for i, t in tqdm(enumerate(timesteps)):
            # init the uvmap mask and all changed uvmap
            uvmap_mask = torch.zeros_like(self.mesh_model.texture_img.data)
            uvmap_all = torch.zeros_like(self.mesh_model.texture_img.data)
            self.denoise_step = t
            self.img_idx = 0
            
            # get the latent origin and fusion them into the uvmap
            for self.view_idx, data in enumerate(self.dataloaders['train']):
                
                if self.view_idx > self.debug_view_num:
                    break

                logger.info(f'--- Painting step #{self.view_idx} ---')
                theta, phi, radius = data['theta'], data['phi'], data['radius']
                # If offset of phi was set from code
                phi = phi - np.deg2rad(self.cfg.render.front_offset)
                phi = float(phi + 2 * np.pi if phi < 0 else phi)
                logger.info(f'Painting from theta: {theta}, phi: {phi}, radius: {radius}')

                background = F.interpolate(self.back_im.unsqueeze(0),
                                    (512, 512),
                                    mode='bilinear', align_corners=False)
                background = self.diffusion.encode_imgs(background)
                self.mesh_model.background = background
                
                # Render from viewpoint
                outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius, background=background, render_cache = self.render_cache[self.view_idx]['render_cache'])
                render_cache = outputs['render_cache']
                self.render_cache[self.view_idx]['render_cache'] = render_cache
                depth_render = outputs['depth']
                
                # Render again with the median value to use as rgb, we shouldn't have color leakage, but just in case
                outputs = self.mesh_model.render(background=background,
                                                render_cache=render_cache, use_median=False)
                rgb_render = outputs['image']

                self.log_overlap_train_image(rgb_render[:,:3,:,:], 'rendered_input')
                # self.log_overlap_train_image(self.diffusion.decode_latents(rgb_render), 'rendered_input')
                self.log_overlap_train_image(depth_render[0, 0], 'depth', colormap=True)

                # text embeddings
                text_z, update_mask = self.render_cache[self.view_idx]['text_z'], self.render_cache[self.view_idx]['update_mask']

                # input the cropped_rgb_render and output the cropped_rgb_output
                latent = self.diffusion.overlap_latent_step(text_z, 
                                                        rgb_render.detach() ,
                                                        depth_render.detach(),
                                                        guidance_scale=self.cfg.guide.guidance_scale,
                                                        i_num=i,
                                                        denoise_step=t,
                                                        update_mask=update_mask,
                                                        fixed_seed=self.cfg.optim.seed,
                                                        intermediate_vis=self.cfg.log.vis_diffusion_steps,
                                                        prev_latent=self.latent_cache[self.view_idx]['latent'])
                self.latent_cache[self.view_idx]['latent'] = latent
                if "PNDM" in self.diffusion.scheduler.__class__.__name__:
                    self.diffusion.scheduler.counter -= 1 

                self.log_overlap_train_image(self.diffusion.decode_latents(latent), 'direct_output')

                # project back to get the updated uvmap
                uvmap_mask, uvmap_all = self.update_latent_uvmap(object_mask=update_mask,
                                                                render_cache=render_cache,
                                                                background=background,
                                                                latent=latent,
                                                                uvmap_mask=uvmap_mask,
                                                                uvmap_all=uvmap_all)

            if "PNDM" in self.diffusion.scheduler.__class__.__name__:
                self.diffusion.scheduler.counter += 1 
            
            # uvmap' zeros 
            uvmap_mask = torch.where(uvmap_mask == 0, torch.tensor(1.0).to(uvmap_mask.device), uvmap_mask) # avoid the zero division
            mean_uvmap = torch.div(uvmap_all, uvmap_mask)
            self.log_overlap_train_image(mean_uvmap[:,:3,:,:], name='mean_uvmap_change')
            self.log_overlap_train_image((uvmap_mask/uvmap_mask.max())[:,:3,:,:], name='uvmap_mask_in_all')
            self.mesh_model.texture_img.data = mean_uvmap
            self.log_overlap_train_image(self.mesh_model.texture_img[:,:3,:,:], name='uvmap_after_change')

    def paint_viewpoint(self, data: Dict[str, Any]):
        logger.info(f'--- Painting step #{self.view_idx} ---')
        theta, phi, radius = data['theta'], data['phi'], data['radius']
        # If offset of phi was set from code
        phi = phi - np.deg2rad(self.cfg.render.front_offset)
        phi = float(phi + 2 * np.pi if phi < 0 else phi)
        logger.info(f'Painting from theta: {theta}, phi: {phi}, radius: {radius}')

        # Set background image
        if self.cfg.guide.use_background_color:
            background = torch.Tensor([0, 0.8, 0]).to(self.device)
        else:
            background = F.interpolate(self.back_im.unsqueeze(0),
                                       (self.cfg.render.train_grid_size, self.cfg.render.train_grid_size),
                                       mode='bilinear', align_corners=False)

        # Render from viewpoint
        outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius, background=background)
        render_cache = outputs['render_cache']
        rgb_render_raw = outputs['image']  # Render where missing values have special color
        depth_render = outputs['depth']
        # Render again with the median value to use as rgb, we shouldn't have color leakage, but just in case
        outputs = self.mesh_model.render(background=background,
                                         render_cache=render_cache, use_median=self.view_idx > 1)
        rgb_render = outputs['image']
        # Render meta texture map
        meta_output = self.mesh_model.render(background=torch.Tensor([0, 0, 0]).to(self.device),
                                             use_meta_texture=True, render_cache=render_cache)

        z_normals = outputs['normals'][:, -1:, :, :].clamp(0, 1)
        z_normals_cache = meta_output['image'].clamp(0, 1)
        edited_mask = meta_output['image'].clamp(0, 1)[:, 1:2]

        self.log_train_image(rgb_render, 'rendered_input')
        self.log_train_image(depth_render[0, 0], 'depth', colormap=True)
        self.log_train_image(z_normals[0, 0], 'z_normals', colormap=True)
        self.log_train_image(z_normals_cache[0, 0], 'z_normals_cache', colormap=True)

        # text embeddings
        if self.cfg.guide.append_direction:
            dirs = data['dir']  # [B,]
            text_z = self.text_z[dirs]
            text_string = self.text_string[dirs]
        else:
            text_z = self.text_z
            text_string = self.text_string
        logger.info(f'text: {text_string}')

        update_mask, generate_mask, refine_mask = self.calculate_trimap(rgb_render_raw=rgb_render_raw,
                                                                        depth_render=depth_render,
                                                                        z_normals=z_normals,
                                                                        z_normals_cache=z_normals_cache,
                                                                        edited_mask=edited_mask,
                                                                        mask=outputs['mask'])

        update_ratio = float(update_mask.sum() / (update_mask.shape[2] * update_mask.shape[3]))
        if self.cfg.guide.reference_texture is not None and update_ratio < 0.01:
            logger.info(f'Update ratio {update_ratio:.5f} is small for an editing step, skipping')
            return

        self.log_train_image(rgb_render * (1 - update_mask), name='masked_input')
        self.log_train_image(rgb_render * refine_mask, name='refine_regions')

        # Crop to inner region based on object mask
        min_h, min_w, max_h, max_w = utils.get_nonzero_region(outputs['mask'][0, 0])
        crop = lambda x: x[:, :, min_h:max_h, min_w:max_w]
        cropped_rgb_render = crop(rgb_render)
        cropped_depth_render = crop(depth_render)
        cropped_update_mask = crop(update_mask)
        self.log_train_image(cropped_rgb_render, name='cropped_input')

        checker_mask = None
        if self.view_idx > 1:
            checker_mask = self.generate_checkerboard(crop(update_mask), crop(refine_mask),
                                                      crop(generate_mask))
            self.log_train_image(F.interpolate(cropped_rgb_render, (512, 512)) * (1 - checker_mask),
                                 'checkerboard_input')
        self.diffusion.use_inpaint = self.cfg.guide.use_inpainting and self.view_idx > 1

        cropped_rgb_output, steps_vis = self.diffusion.img2img_step(text_z, cropped_rgb_render.detach(),
                                                                    cropped_depth_render.detach(),
                                                                    guidance_scale=self.cfg.guide.guidance_scale,
                                                                    strength=1.0, update_mask=cropped_update_mask,
                                                                    fixed_seed=self.cfg.optim.seed,
                                                                    check_mask=checker_mask,
                                                                    intermediate_vis=self.cfg.log.vis_diffusion_steps)
        self.log_train_image(cropped_rgb_output, name='direct_output')
        self.log_diffusion_steps(steps_vis)

        cropped_rgb_output = F.interpolate(cropped_rgb_output,
                                           (cropped_rgb_render.shape[2], cropped_rgb_render.shape[3]),
                                           mode='bilinear', align_corners=False)

        # Extend rgb_output to full image size
        rgb_output = rgb_render.clone()
        rgb_output[:, :, min_h:max_h, min_w:max_w] = cropped_rgb_output
        self.log_train_image(rgb_output, name='full_output')

        # Project back
        object_mask = outputs['mask']
        fitted_pred_rgb, _ = self.project_back(render_cache=render_cache, background=background, rgb_output=rgb_output,
                                               object_mask=object_mask, update_mask=update_mask, z_normals=z_normals,
                                               z_normals_cache=z_normals_cache)
        self.log_train_image(fitted_pred_rgb, name='fitted')
        self.log_train_image(self.mesh_model.texture_img, name='uvmap')
        return

    def eval_render(self, data):
        theta = data['theta']
        phi = data['phi']
        radius = data['radius']
        phi = phi - np.deg2rad(self.cfg.render.front_offset)
        phi = float(phi + 2 * np.pi if phi < 0 else phi)
        dim = self.cfg.render.eval_grid_size
        outputs = self.mesh_model.render(theta=theta, phi=phi, radius=radius,
                                         dims=(dim, dim), background='white')
        z_normals = outputs['normals'][:, -1:, :, :].clamp(0, 1)
        rgb_render = outputs['image']  # .permute(0, 2, 3, 1).contiguous().clamp(0, 1)
        diff = (rgb_render.detach() - torch.tensor(self.mesh_model.default_color).view(1, 3, 1, 1).to(
            self.device)).abs().sum(axis=1)
        uncolored_mask = (diff < 0.1).float().unsqueeze(0)
        rgb_render = rgb_render * (1 - uncolored_mask) + utils.color_with_shade([0.85, 0.85, 0.85], z_normals=z_normals,
                                                                                light_coef=0.3) * uncolored_mask

        outputs_with_median = self.mesh_model.render(theta=theta, phi=phi, radius=radius,
                                                     dims=(dim, dim), use_median=True,
                                                     render_cache=outputs['render_cache'])

        meta_output = self.mesh_model.render(theta=theta, phi=phi, radius=radius,
                                             background=torch.Tensor([0, 0, 0]).to(self.device),
                                             use_meta_texture=True, render_cache=outputs['render_cache'])
        pred_z_normals = meta_output['image'][:, :1].detach()
        rgb_render = rgb_render.permute(0, 2, 3, 1).contiguous().clamp(0, 1).detach()
        texture_rgb = outputs_with_median['texture_map'].permute(0, 2, 3, 1).contiguous().clamp(0, 1).detach()
        depth_render = outputs['depth'].permute(0, 2, 3, 1).contiguous().detach()

        return rgb_render, texture_rgb, depth_render, pred_z_normals

    def cal_update_mask(self, rgb_render_raw: torch.Tensor,
                         depth_render: torch.Tensor,
                         mask: torch.Tensor):
        # diff = (rgb_render_raw.detach() - torch.tensor(self.mesh_model.default_color).view(1, 3, 1, 1).to(
        #     self.device)).abs().sum(axis=1)
        # exact_generate_mask = (diff < 0.1).float().unsqueeze(0)

        # # Extend mask
        # generate_mask = torch.from_numpy(
        #     cv2.dilate(exact_generate_mask[0, 0].detach().cpu().numpy(), np.ones((19, 19), np.uint8))).to(
        #     exact_generate_mask.device).unsqueeze(0).unsqueeze(0)

        # update_mask = generate_mask.clone()

        object_mask = torch.ones_like(mask)
        object_mask[depth_render == 0] = 0
        # object_mask = torch.from_numpy(
        #     cv2.erode(object_mask[0, 0].detach().cpu().numpy(), np.ones((7, 7), np.uint8))).to(
        #     object_mask.device).unsqueeze(0).unsqueeze(0)
        
        # update_mask[torch.bitwise_and(object_mask == 0, generate_mask == 0)] = 0

        return object_mask
                        
    def calculate_trimap(self, rgb_render_raw: torch.Tensor,
                         depth_render: torch.Tensor,
                         z_normals: torch.Tensor, z_normals_cache: torch.Tensor, edited_mask: torch.Tensor,
                         mask: torch.Tensor):
        diff = (rgb_render_raw.detach() - torch.tensor(self.mesh_model.default_color).view(1, 3, 1, 1).to(
            self.device)).abs().sum(axis=1)
        exact_generate_mask = (diff < 0.1).float().unsqueeze(0)

        # Extend mask
        generate_mask = torch.from_numpy(
            cv2.dilate(exact_generate_mask[0, 0].detach().cpu().numpy(), np.ones((19, 19), np.uint8))).to(
            exact_generate_mask.device).unsqueeze(0).unsqueeze(0)

        update_mask = generate_mask.clone()

        object_mask = torch.ones_like(update_mask)
        object_mask[depth_render == 0] = 0
        object_mask = torch.from_numpy(
            cv2.erode(object_mask[0, 0].detach().cpu().numpy(), np.ones((7, 7), np.uint8))).to(
            object_mask.device).unsqueeze(0).unsqueeze(0)

        # Generate the refine mask based on the z normals, and the edited mask

        refine_mask = torch.zeros_like(update_mask)
        refine_mask[z_normals > z_normals_cache[:, :1, :, :] + self.cfg.guide.z_update_thr] = 1
        if self.cfg.guide.initial_texture is None:
            refine_mask[z_normals_cache[:, :1, :, :] == 0] = 0
        elif self.cfg.guide.reference_texture is not None:
            refine_mask[edited_mask == 0] = 0
            refine_mask = torch.from_numpy(
                cv2.dilate(refine_mask[0, 0].detach().cpu().numpy(), np.ones((31, 31), np.uint8))).to(
                mask.device).unsqueeze(0).unsqueeze(0)
            refine_mask[mask == 0] = 0
            # Don't use bad angles here
            refine_mask[z_normals < 0.4] = 0
        else:
            # Update all regions inside the object
            refine_mask[mask == 0] = 0

        refine_mask = torch.from_numpy(
            cv2.erode(refine_mask[0, 0].detach().cpu().numpy(), np.ones((5, 5), np.uint8))).to(
            mask.device).unsqueeze(0).unsqueeze(0)
        refine_mask = torch.from_numpy(
            cv2.dilate(refine_mask[0, 0].detach().cpu().numpy(), np.ones((5, 5), np.uint8))).to(
            mask.device).unsqueeze(0).unsqueeze(0)
        update_mask[refine_mask == 1] = 1

        update_mask[torch.bitwise_and(object_mask == 0, generate_mask == 0)] = 0

        # Visualize trimap
        if self.cfg.log.log_images:
            trimap_vis = utils.color_with_shade(color=[112 / 255.0, 173 / 255.0, 71 / 255.0], z_normals=z_normals)
            trimap_vis[mask.repeat(1, 3, 1, 1) == 0] = 1
            trimap_vis = trimap_vis * (1 - exact_generate_mask) + utils.color_with_shade(
                [255 / 255.0, 22 / 255.0, 67 / 255.0],
                z_normals=z_normals,
                light_coef=0.7) * exact_generate_mask

            shaded_rgb_vis = rgb_render_raw.detach()
            shaded_rgb_vis = shaded_rgb_vis * (1 - exact_generate_mask) + utils.color_with_shade([0.85, 0.85, 0.85],
                                                                                                 z_normals=z_normals,
                                                                                                 light_coef=0.7) * exact_generate_mask

            if self.view_idx > 1 or self.cfg.guide.initial_texture is not None:
                refinement_color_shaded = utils.color_with_shade(color=[91 / 255.0, 155 / 255.0, 213 / 255.0],
                                                                 z_normals=z_normals)
                only_old_mask_for_vis = torch.bitwise_and(refine_mask == 1, exact_generate_mask == 0).float().detach()
                trimap_vis = trimap_vis * 0 + 1.0 * (trimap_vis * (
                        1 - only_old_mask_for_vis) + refinement_color_shaded * only_old_mask_for_vis)
            self.log_train_image(shaded_rgb_vis, 'shaded_input')
            self.log_train_image(trimap_vis, 'trimap')

        return update_mask, generate_mask, refine_mask

    def generate_checkerboard(self, update_mask_inner, improve_z_mask_inner, update_mask_base_inner):
        checkerboard = torch.ones((1, 1, 64 // 2, 64 // 2)).to(self.device)
        # Create a checkerboard grid
        checkerboard[:, :, ::2, ::2] = 0
        checkerboard[:, :, 1::2, 1::2] = 0
        checkerboard = F.interpolate(checkerboard,
                                     (512, 512))
        checker_mask = F.interpolate(update_mask_inner, (512, 512))
        only_old_mask = F.interpolate(torch.bitwise_and(improve_z_mask_inner == 1,
                                                        update_mask_base_inner == 0).float(), (512, 512))
        checker_mask[only_old_mask == 1] = checkerboard[only_old_mask == 1]
        return checker_mask   
    
    def project_back_meta_texture(self, render_cache: Dict[str, Any]):

        optimizer = torch.optim.Adam(self.mesh_model.get_params(), lr=self.cfg.optim.lr, betas=(0.9, 0.99),
                                     eps=1e-15)
        for _ in tqdm(range(200), desc='Fitting meta texture map!'):
            optimizer.zero_grad()
            # update the z_normals
            meta_outputs = self.mesh_model.render(background=torch.Tensor([0, 0, 0]).to(self.device),
                                                use_meta_texture=True, render_cache=render_cache)
            current_z_normals = meta_outputs['image']
            z_normals_cache = meta_outputs['normals'][:, -1:, :, :].clamp(0, 1)

            current_z_mask = meta_outputs['mask'].flatten()
            masked_current_z_normals = current_z_normals.reshape(1, current_z_normals.shape[1], -1)[:, :,
                                    current_z_mask == 1][:, :1]
            masked_last_z_normals = z_normals_cache.reshape(1, z_normals_cache.shape[1], -1)[:, :,
                                    current_z_mask == 1][:, :1]
            loss = (masked_current_z_normals - masked_last_z_normals.detach()).pow(2).mean()   

            loss.backward()
            optimizer.step()

        return None
    
    def project_back_overlap(self, render_cache: Dict[str, Any], background: Any, rgb_output: torch.Tensor, update_mask: torch.Tensor,
                     object_mask: torch.Tensor, z_normals_cache = None):
        # object_mask = torch.from_numpy(
        #     cv2.erode(object_mask[0, 0].detach().cpu().numpy(), np.ones((5, 5), np.uint8))).to(
        #     object_mask.device).unsqueeze(0).unsqueeze(0)
        render_update_mask = object_mask.clone()

        render_update_mask[update_mask == 0] = 0

        blurred_render_update_mask = torch.from_numpy(
            cv2.dilate(render_update_mask[0, 0].detach().cpu().numpy(), np.ones((25, 25), np.uint8))).to(
            render_update_mask.device).unsqueeze(0).unsqueeze(0)
        blurred_render_update_mask = utils.gaussian_blur(blurred_render_update_mask, 21, 16)

        # Do not get out of the object
        blurred_render_update_mask[object_mask == 0] = 0

        render_update_mask = blurred_render_update_mask
        self.log_overlap_train_image(rgb_output * render_update_mask, 'project_back_input')

        optimizer = torch.optim.Adam(self.mesh_model.get_params(), lr=self.cfg.optim.lr, betas=(0.9, 0.99),
                                     eps=1e-15)
        for _ in tqdm(range(200), desc='fitting mesh colors'):
            optimizer.zero_grad()
            outputs = self.mesh_model.render(background=background,
                                             render_cache=render_cache)
            rgb_render = outputs['image']

            mask = render_update_mask.flatten()
            masked_pred = rgb_render.reshape(1, rgb_render.shape[1], -1)[:, :, mask > 0]
            masked_target = rgb_output.reshape(1, rgb_output.shape[1], -1)[:, :, mask > 0]
            masked_mask = mask[mask > 0]
            loss = ((masked_pred - masked_target.detach()).pow(2) * masked_mask).mean() + (
                    (masked_pred - masked_pred.detach()).pow(2) * (1 - masked_mask)).mean()
            
            loss.backward()
            optimizer.step()

        return rgb_render
    
    def project_back(self, render_cache: Dict[str, Any], background: Any, rgb_output: torch.Tensor,
                     object_mask: torch.Tensor, update_mask: torch.Tensor, z_normals: torch.Tensor,
                     z_normals_cache: torch.Tensor):
        object_mask = torch.from_numpy(
            cv2.erode(object_mask[0, 0].detach().cpu().numpy(), np.ones((5, 5), np.uint8))).to(
            object_mask.device).unsqueeze(0).unsqueeze(0)
        render_update_mask = object_mask.clone()

        render_update_mask[update_mask == 0] = 0

        blurred_render_update_mask = torch.from_numpy(
            cv2.dilate(render_update_mask[0, 0].detach().cpu().numpy(), np.ones((25, 25), np.uint8))).to(
            render_update_mask.device).unsqueeze(0).unsqueeze(0)
        blurred_render_update_mask = utils.gaussian_blur(blurred_render_update_mask, 21, 16)

        # Do not get out of the object
        blurred_render_update_mask[object_mask == 0] = 0

        if self.cfg.guide.strict_projection:
            blurred_render_update_mask[blurred_render_update_mask < 0.5] = 0
            # Do not use bad normals
            z_was_better = z_normals + self.cfg.guide.z_update_thr < z_normals_cache[:, :1, :, :]
            blurred_render_update_mask[z_was_better] = 0

        render_update_mask = blurred_render_update_mask
        self.log_train_image(rgb_output * render_update_mask, 'project_back_input')

        # Update the normals
        z_normals_cache[:, 0, :, :] = torch.max(z_normals_cache[:, 0, :, :], z_normals[:, 0, :, :])

        optimizer = torch.optim.Adam(self.mesh_model.get_params(), lr=self.cfg.optim.lr, betas=(0.9, 0.99),
                                     eps=1e-15)
        for _ in tqdm(range(200), desc='fitting mesh colors'):
            optimizer.zero_grad()
            outputs = self.mesh_model.render(background=background,
                                             render_cache=render_cache)
            rgb_render = outputs['image']

            mask = render_update_mask.flatten()
            masked_pred = rgb_render.reshape(1, rgb_render.shape[1], -1)[:, :, mask > 0]
            masked_target = rgb_output.reshape(1, rgb_output.shape[1], -1)[:, :, mask > 0]
            masked_mask = mask[mask > 0]
            loss = ((masked_pred - masked_target.detach()).pow(2) * masked_mask).mean() + (
                    (masked_pred - masked_pred.detach()).pow(2) * (1 - masked_mask)).mean()

            meta_outputs = self.mesh_model.render(background=torch.Tensor([0, 0, 0]).to(self.device),
                                                  use_meta_texture=True, render_cache=render_cache)
            current_z_normals = meta_outputs['image']
            current_z_mask = meta_outputs['mask'].flatten()
            masked_current_z_normals = current_z_normals.reshape(1, current_z_normals.shape[1], -1)[:, :,
                                       current_z_mask == 1][:, :1]
            masked_last_z_normals = z_normals_cache.reshape(1, z_normals_cache.shape[1], -1)[:, :,
                                    current_z_mask == 1][:, :1]
            loss += (masked_current_z_normals - masked_last_z_normals.detach()).pow(2).mean()
            loss.backward()
            optimizer.step()

        return rgb_render, current_z_normals

    def log_overlap_train_image(self, tensor: torch.Tensor, name: str, colormap=False):
        if self.cfg.log.log_images:
            if colormap:
                tensor = cm.seismic(tensor.detach().cpu().numpy())[:, :, :3]
            else:
                tensor = einops.rearrange(tensor, '(1) c h w -> h w c').detach().cpu().numpy()
            os.makedirs(self.train_renders_path /f'denoise_step:{self.denoise_step}', exist_ok=True)
            Image.fromarray((tensor * 255).astype(np.uint8)).save(
                self.train_renders_path /f'denoise_step:{self.denoise_step}'/ f'{self.view_idx:03d}_{self.img_idx}_{name}.jpg')
            self.img_idx +=1

    def log_train_image(self, tensor: torch.Tensor, name: str, colormap=False):
        if self.cfg.log.log_images:
            if colormap:
                tensor = cm.seismic(tensor.detach().cpu().numpy())[:, :, :3]
            else:
                tensor = einops.rearrange(tensor, '(1) c h w -> h w c').detach().cpu().numpy()
            Image.fromarray((tensor * 255).astype(np.uint8)).save(
                self.train_renders_path / f'{self.view_idx:04d}_{name}.jpg')

    def log_diffusion_steps(self, intermediate_vis: List[Image.Image]):
        if len(intermediate_vis) > 0:
            step_folder = self.train_renders_path / f'{self.view_idx:04d}_diffusion_steps'
            step_folder.mkdir(exist_ok=True)
            for k, intermedia_res in enumerate(intermediate_vis):
                intermedia_res.save(
                    step_folder / f'{k:02d}_diffusion_step.jpg')

    def save_image(self, tensor: torch.Tensor, path: Path):
        if self.cfg.log.log_images:
            Image.fromarray(
                (einops.rearrange(tensor, '(1) c h w -> h w c').detach().cpu().numpy() * 255).astype(np.uint8)).save(
                path)
