import json
import cv2
import numpy as np
import os
from torch.utils.data import Dataset
import random
import pdb
import PIL.Image as Image
import torchvision.transforms as T
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import math

from os.path import join, exists
from tqdm import tqdm 
from loguru import logger


# from daniel_tools.img_utils import colorize_depth_map

from daniel_tools.img_utils import *

def get_sketch(image_tensor):
    """
    Convert torch tensor to sketch using Canny edge detection and Gaussian blur.
    Also returns grayscale version of the image.
    
    Args:
        image_tensor (torch.Tensor): Input tensor of shape [3,H,W]
    
    Returns:
        tuple: (sketch_tensor, grey_tensor) both of shape [3,H,W]
    """
    # Convert torch tensor [3,H,W] to numpy array [H,W,3]
    if image_tensor.dim() == 3:
        image = (image_tensor.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
    else:
        raise ValueError("Expected input tensor of shape [3,H,W]")
    
    # Convert RGB to grayscale
    grey = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    
    # Apply Canny edge detection
    edges = cv2.Canny(image=grey, threshold1=5, threshold2=180)
    
    # Apply Gaussian blur
    edges = cv2.GaussianBlur(edges, (3, 3), sigmaX=0, sigmaY=0)
    
    # Invert the edges
    edges = cv2.bitwise_not(edges)
    
    # Convert edges back to torch tensor [1,H,W] with values normalized to [0,1]
    edges_tensor = torch.from_numpy(edges).float() / 255.0
    sketch_tensor = edges_tensor.unsqueeze(0).repeat(3, 1, 1)  # Repeat to get 3 channels
    
    # Convert grayscale to torch tensor [1,H,W] with values normalized to [0,1]
    grey_tensor = torch.from_numpy(grey).float() / 255.0
    grey_tensor = grey_tensor.unsqueeze(0).repeat(3, 1, 1)  # Repeat to get 3 channels
    
    return sketch_tensor, grey_tensor





def resize(sample, new_H, new_W):
    _, orig_H, orig_W = sample.shape
    sample = F.interpolate(sample.unsqueeze(0), size=(new_H, new_W), mode='bilinear', align_corners=False, antialias=True).squeeze(0)
    return sample

def pad(sample, lrtb):
    l, r, t, b = lrtb
    sample = F.pad(sample, (l, r, t, b), mode="constant", value=0)
    return sample


def crop(sample, y, H, x, W):
    sample = sample[:, y:y+H, x:x+W]
    return sample

class RandomIntrins():
    """ randomize intrinsics
        sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
    """
    def __init__(self, target_size=512):
        self.aspect_ratios = [
                    (320, 960),
                    (384, 800),
                    (448, 672),
                    (512, 608),
                    (576, 544),
                    (640, 480),
                    (704, 448),
                    (768, 416),
                    (832, 384),
                    (896, 352),
                    (960, 320),
                    (1280, 720),
                    (720, 1280),
                    (960, 540), 
                ]
        self.target_size=target_size

    def __call__(self, sample, ):
        crop_H, crop_W = random.choice(self.aspect_ratios)
        info = {
            'crop_H': crop_H,
            'crop_W': crop_W,
        }

        # height-based resizing
        _, orig_H, orig_W = sample.shape
        new_H = random.randrange(min(orig_H, crop_H), max(orig_H, crop_H)+1)
        new_W = round((new_H / orig_H) * orig_W)
        sample = resize(sample, new_H=new_H, new_W=new_W)

        # pad if necessary
        orig_H, orig_W = sample.shape[1], sample.shape[2]
        l, r, t, b = 0, 0, 0, 0
        if crop_H > orig_H:
            t = b = crop_H - orig_H
        if crop_W > orig_W:
            l = r = crop_W - orig_W
        sample = pad(sample, (l, r, t, b))

        # crop
        assert sample.shape[1] >= crop_H
        assert sample.shape[2] >= crop_W
        x = random.randint(0, sample.shape[2] - crop_W)
        y = random.randint(0, sample.shape[1] - crop_H)
        sample = crop(sample, y=y, H=crop_H, x=x, W=crop_W)

        return sample

class Outpainting():
    """ randomize intrinsics
        sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
    """
    def __init__(self, target_size=512):
        self.aspect_ratios = [
                    (320, 960),
                    (384, 800),
                    (448, 672),
                    (512, 608),
                    (576, 544),
                    (640, 480),
                    (704, 448),
                    (768, 416),
                    (832, 384),
                    (896, 352),
                    (960, 320),
                ]
        self.target_size=target_size

    def __call__(self, sample, ):
        crop_H, crop_W = random.choice(self.aspect_ratios)

        # height-based resizing
        _, orig_H, orig_W = sample.shape
        if crop_H > crop_W:
            new_H = orig_H
            new_W = int(crop_W * orig_W / crop_H)
        else:
            new_H = int(crop_H * orig_H / crop_W)
            new_W = orig_W

        assert sample.shape[1] >= new_H
        assert sample.shape[2] >= new_W
        x = random.randint(0, sample.shape[2] - new_W)
        y = random.randint(0, sample.shape[1] - new_H)

        sample_new = torch.ones_like(sample)
        sample_new[:3, y:y+new_H, x:x+new_W] = sample[:3, y:y+new_H, x:x+new_W]
        sample_new[3:] = sample[3:]
        return sample_new

class SquarePad:
	def __call__(self, sample):
		orig_H, orig_W = sample.shape[1], sample.shape[2]
		max_wh = max(orig_H, orig_W)
		l, r, t, b = 0, 0, 0, 0
		if max_wh > orig_H:
			t = b = (max_wh - orig_H) // 2
		if max_wh > orig_W:
			l = r = (max_wh - orig_W) // 2
		sample = pad(sample, (l, r, t, b))

		return sample


from torchvision import transforms
class ObjectronImageDatasetAug(Dataset):
    def __init__(self, json_files=['./data/DIODE_normal/train.jsonl'],
                 source_aug=None, source_aug_prob=0.1, resolution=768, task='normal'):
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data += [data_term]
        self.to_Tensor = T.ToTensor()
        self.resolution = resolution
        self.resize_transform = T.Resize((self.resolution, self.resolution), interpolation=Image.NEAREST)
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        self.task = task

    def preprocess_image(self, input_image: Image.Image, mask: Image.Image = None) -> Image.Image:
        """
        Preprocess the input image using the mask if provided.
        """
        input_np = np.array(input_image)
        
        if mask is not None:
            mask_np = np.array(mask)
            # Create binary mask
            binary_mask = (mask_np == 1).astype(np.float32)
            
            # Find bounding box of the masked region
            bbox = np.argwhere(binary_mask > 0)
            if len(bbox) > 0:
                bbox = np.min(bbox[:, 1]), np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.max(bbox[:, 0])
                center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
                size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
                size = int(size * 1.2)  # Add 20% padding
                
                # Calculate crop coordinates
                bbox = (
                    int(center[0] - size // 2),
                    int(center[1] - size // 2),
                    int(center[0] + size // 2),
                    int(center[1] + size // 2)
                )
                
                # Ensure bbox is within image bounds
                bbox = (
                    max(0, bbox[0]),
                    max(0, bbox[1]),
                    min(input_np.shape[1], bbox[2]),
                    min(input_np.shape[0], bbox[3])
                )
                
                # Crop image and mask
                input_image = input_image.crop(bbox)
                mask = mask.crop(bbox)
                
        return input_image, mask

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        while True:
            try:
                return self._load_item(idx)
            except Exception as e:
                print(f"Error loading item {idx}: {e}")
                idx = random.randint(0, len(self.data) - 1)

    def _load_item(self, idx):
        item = self.data[idx]

        data_dir = item['data_dir']
        source_path = os.path.join(data_dir, item['source'])
        target_path = os.path.join(data_dir, item['target'])
        
        mask_path = os.path.join(data_dir, item['target']).replace("_nocs.png", "_instances.png")
        source_image = Image.open(source_path)
        source_mask = Image.open(mask_path).resize(source_image.size)
        target_image = Image.open(target_path).resize(source_image.size)
        
        # Preprocess images
        source_image, _ = self.preprocess_image(source_image, source_mask)
        target_image, source_mask = self.preprocess_image(target_image, source_mask)
        
        # Apply mask to source image, make background white
        source_tensor = self.to_Tensor(source_image.convert('RGB'))
        mask = (self.to_Tensor(source_mask) == 1).float()
        source = source_tensor * mask + (1 - mask)
        reference = source_tensor
        
        target = self.to_Tensor(target_image.convert('RGB'))
        target_mask = self.to_Tensor(target_image.convert('L')).bool()
        
        mask[target_mask] = 2. # zero is background, one is object, two is target 
        cat_data = torch.cat((source, target, reference, mask),0)
        cat_data = self.resize_transform(cat_data)
        
        if self.source_aug is not None:
            if random.random() < self.source_aug_prob:
                source = self.source_aug(source)
        source = cat_data[:3]
        target = cat_data[3:6]
        reference = cat_data[6:9]
        mask = cat_data[9]

        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        # for normal
        invalid_mask = mask != 2.
        if self.task == 'normal':
            target[2, invalid_mask[0]] = 1.
        else:
            target[invalid_mask.repeat(3, 1, 1)] = 1.
            
        fg_mask = mask != 0.
        target = torch.cat((target, fg_mask.unsqueeze(0)), 0)

        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)
        return dict(jpg=target, hint=source, ref=reference)

from torchvision import transforms

class NormalEnhancementDatasetAug(Dataset):
    def __init__(self, json_files=['./data/DIODE_normal/train.jsonl'],
                 source_aug=None, source_aug_prob=0.1, resolution=768, task='normal'):
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data += [data_term]
        self.to_Tensor = T.ToTensor()
        self.resolution = resolution
        self.resize_transform = T.Resize((self.resolution, self.resolution), interpolation=Image.NEAREST)
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        self.task = task

    def preprocess_image(self, input_image: Image.Image, mask: Image.Image = None) -> Image.Image:
        """
        Preprocess the input image using the mask if provided.
        """
        input_np = np.array(input_image)
        
        if mask is not None:
            mask_np = np.array(mask)
            # Create binary mask
            binary_mask = (mask_np == 1).astype(np.float32)
            
            # Find bounding box of the masked region
            bbox = np.argwhere(binary_mask > 0)
            if len(bbox) > 0:
                bbox = np.min(bbox[:, 1]), np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.max(bbox[:, 0])
                center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
                size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
                size = int(size * 1.2)  # Add 20% padding
                
                # Calculate crop coordinates
                bbox = (
                    int(center[0] - size // 2),
                    int(center[1] - size // 2),
                    int(center[0] + size // 2),
                    int(center[1] + size // 2)
                )
                
                # Ensure bbox is within image bounds
                bbox = (
                    max(0, bbox[0]),
                    max(0, bbox[1]),
                    min(input_np.shape[1], bbox[2]),
                    min(input_np.shape[0], bbox[3])
                )
                
                # Crop image and mask
                input_image = input_image.crop(bbox)
                mask = mask.crop(bbox)
                
        return input_image, mask

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        try:
            return self._load_item(idx)
        except Exception as e:
            # If failed, try a random index
            random_idx = random.randint(0, len(self.data) - 1)
            return self._load_item(random_idx)

    def _load_item(self, idx):
        item = self.data[idx]

        data_dir = item['data_dir']
        source_path = os.path.join(data_dir, item['source'])
        target_path = os.path.join(data_dir, item['target'])
        
        source_image = Image.open(source_path)  
        
        source_normal = np.array(source_image) / 255 * 2 - 1
        source_mask = np.linalg.norm(np.array(source_normal), axis=2) > 0.5
        source_mask = Image.fromarray(source_mask.astype(np.uint8)*255)
        target_image = Image.open(target_path).resize(source_image.size)
        
        # Preprocess images
        source_image, _ = self.preprocess_image(source_image, source_mask)
        target_image, source_mask = self.preprocess_image(target_image, source_mask)
        
        # Apply mask to source image, make background white
        source_tensor = self.to_Tensor(source_image.convert('RGB'))
        mask = (self.to_Tensor(source_mask) == 1).float()
        source = source_tensor * mask + (1 - mask)
        reference = source_tensor
        
        target = self.to_Tensor(target_image.convert('RGB'))
        target_mask = self.to_Tensor(source_mask).bool()
        
        mask[target_mask] = 2. # zero is background, one is object, two is target 
        cat_data = torch.cat((source, target, reference, mask),0)
        cat_data = self.resize_transform(cat_data)
        
        if self.source_aug is not None:
            if random.random() < self.source_aug_prob:
                source = self.source_aug(source)
        source = cat_data[:3]
        target = cat_data[3:6]
        reference = cat_data[6:9]
        mask = cat_data[9]

        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        # for normal
        invalid_mask = mask != 2.
        if self.task == 'normal':
            source[0] = -source[0]
            target[0] = -target[0]
            target[2, invalid_mask[0]] = 1.
            source[invalid_mask.repeat(3, 1, 1)] = 1.
        else:
            target[invalid_mask.repeat(3, 1, 1)] = 1.
            
        fg_mask = mask == 2.
        target = torch.cat((target, fg_mask.unsqueeze(0)), 0)

        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)
        return dict(jpg=target, hint=source, ref=reference)
    
class NpzDatasetAug(Dataset):
    def __init__(self, json_files=None,
                 source_aug=None, source_aug_prob=0.1, resolution=1024, task='normal'):
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data += [data_term]
        self.to_Tensor = T.ToTensor()
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        self.task = task
        self.resolution = resolution
        self.resize_transform = T.Resize(self.resolution)
        
    def __len__(self):
        return len(self.data)
        
    def __getitem__(self, idx):
        try:
            return self._load_item(idx)
        except Exception as e:
            # If failed, try a random index
            random_idx = random.randint(0, len(self.data) - 1)
            return self._load_item(random_idx)

    def _load_item(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        npz_path = os.path.join(data_dir, item['source_image'])
        _data = np.load(npz_path)
        _data_len = len(_data['color'])
        
        # random_idx = random.randint(0, _data_len - 1)
        random_idx = 16
        prompt = ""
        
        # Load and convert images to tensors
        source = self.to_Tensor(_data['color'][random_idx])
        target = self.to_Tensor(_data[self.task][random_idx])
        if self.task == 'normal':
            reference = self.to_Tensor(_data['nocs'][random_idx])
        else:
            reference = self.to_Tensor(_data['normal'][random_idx])
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)

        # Normalize to [-1, 1]
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        cat_data = torch.cat((source, target, reference),0)
        cat_data = self.resize_transform(cat_data)
        source = cat_data[:3]
        target = cat_data[3:6]
        reference = cat_data[6:9]
        
        # Handle normal map processing
        if self.task == 'normal':
            target_norm = torch.norm(target, p=2, dim=0, keepdim=True)
            valid_mask = (target_norm > 0.5) & (target_norm < 1.5)
        else:
            reference_norm = torch.norm(reference, p=2, dim=0, keepdim=True)
            valid_mask = (reference_norm > 0.5) & (reference_norm < 1.5)
        
        invalid_mask = ~valid_mask
        if self.task == 'normal':
            target[2, invalid_mask[0]] = 1.
        else:
            target[invalid_mask.repeat(3, 1, 1)] = 1.
        source[invalid_mask.repeat(3, 1, 1)] = 1.
        reference[invalid_mask.repeat(3, 1, 1)] = 1.

        target = torch.cat((target, valid_mask), 0)
        # Permute dimensions for output
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)

        # Return with random condition
        if random.random() < 0.1:
            return dict(jpg=target, txt=prompt, hint=reference, ref=reference)
        else:
            return dict(jpg=target, txt=prompt, hint=source, ref=reference)

class NpzDatasetAug_plus_RandomIntrins(Dataset):
    def __init__(self, json_files=None,
                 source_aug=None, source_aug_prob=0.1, 
                 resolution=1024, task='normal'):
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data += [data_term]
        self.to_Tensor = T.ToTensor()
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        self.task = task
        self.resolution = resolution
        self.resize_transform = T.Resize((self.resolution, self.resolution), antialias=True)
        
        self.aspect_ratios = [
            (320, 960), (384, 800), (448, 672),
            (512, 608), (576, 544), (640, 480),
            (704, 448), (768, 416), (832, 384),
            (896, 352), (960, 320),
        ]
    
    def random_aspect_crop(self, img):
        """Apply random aspect ratio crop based on Outpainting ratios"""
        crop_H, crop_W = random.choice(self.aspect_ratios)
        _, orig_H, orig_W = img.shape
        
        if crop_H > crop_W:
            new_H = orig_H
            new_W = int(crop_W * orig_W / crop_H)
        else:
            new_H = int(crop_H * orig_H / crop_W)
            new_W = orig_W
            
        # Ensure we have enough space for cropping
        if orig_H < new_H or orig_W < new_W:
            scale = max(new_H/orig_H, new_W/orig_W)
            img = TF.resize(img, (int(orig_H*scale), int(orig_W*scale)), antialias=True)
            orig_H, orig_W = int(orig_H*scale), int(orig_W*scale)
            
        # Random crop position
        x = random.randint(0, orig_W - new_W)
        y = random.randint(0, orig_H - new_H)
        
        return img[:, y:y+new_H, x:x+new_W], (x, y, new_W, new_H)
        
    def rotate_normal_map_90(self, normal_map):
        """Rotate normal map 90 degrees clockwise and adjust vectors"""
        # Rotate image 90 degrees clockwise
        rotated_image = torch.rot90(normal_map, k=1, dims=[1, 2])  # Changed to k=1 for proper orientation
        rotated_image = (rotated_image * 2.0) - 1.0
        
        # Swap x and y components and negate appropriately for 90-degree rotation
        x, y, z = rotated_image[0], rotated_image[1], rotated_image[2]
        # rotated_image = torch.stack([y, -x, z], dim=0)
        rotated_image = torch.stack([-y, -x, z], dim=0)
        rotated_image = (rotated_image + 1) / 2
    
        return rotated_image

    def __len__(self):
        return len(self.data)
        
    def __getitem__(self, idx):
        try:
            return self._load_item(idx)
        except Exception as e:
            # If failed, try a random index
            random_idx = random.randint(0, len(self.data) - 1)
            return self._load_item(random_idx)

    def _load_item(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        npz_path = os.path.join(data_dir, item['source_image'])
        _data = np.load(npz_path)
        _data_len = len(_data['color'])
        
        random_idx = random.randint(0, _data_len - 1)
        prompt = ""
        
        # Load and convert images to tensors
        source = self.to_Tensor(_data['color'][random_idx])
        target = self.to_Tensor(_data[self.task][random_idx])
        if self.task == 'normal':
            reference = self.to_Tensor(_data['nocs'][random_idx])
        else:
            reference = self.to_Tensor(_data['normal'][random_idx])
        
        # Random aspect ratio crop - use same crop parameters for all
        source, crop_params = self.random_aspect_crop(source)
        x, y, new_W, new_H = crop_params
        target = target[:, y:y+new_H, x:x+new_W]
        reference = reference[:, y:y+new_H, x:x+new_W]
        is_vertical = new_H > new_W
        
        # if is_vertical:
        #     source = torch.rot90(source, k=1, dims=[1, 2])
        #     if self.task == 'normal':
        #         target = self.rotate_normal_map_90(target)
        #         reference = torch.rot90(reference, k=1, dims=[1, 2])
        #     else:
        #         target = torch.rot90(target, k=1, dims=[1, 2])
        #         reference = self.rotate_normal_map_90(reference)

        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)

        # Normalize to [-1, 1]
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        cat_data = torch.cat((source, target, reference),0)
        cat_data = self.resize_transform(cat_data)
        source = cat_data[:3]
        target = cat_data[3:6]
        reference = cat_data[6:9]
        
        # Handle normal map processing
        if self.task == 'normal':
            target_norm = torch.norm(target, p=2, dim=0, keepdim=True)
            valid_mask = (target_norm > 0.5) & (target_norm < 1.5)
        else:
            reference_norm = torch.norm(reference, p=2, dim=0, keepdim=True)
            valid_mask = (reference_norm > 0.5) & (reference_norm < 1.5)
        
        invalid_mask = ~valid_mask
        if self.task == 'normal':
            target[2, invalid_mask[0]] = 1.
        else:
            target[invalid_mask.repeat(3, 1, 1)] = 1.
        source[invalid_mask.repeat(3, 1, 1)] = 1.
        reference[invalid_mask.repeat(3, 1, 1)] = 1.

        target = torch.cat((target, valid_mask), 0)
        
        # Permute dimensions for output
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)
        
        
        # Return with random condition
        if random.random() < 0.1:
            return dict(jpg=target, txt=prompt, hint=reference, ref=reference)
        else:
            return dict(jpg=target, txt=prompt, hint=source, ref=reference)
    
import pyexr

def blender_world_normal_2_camera(normals_world: np.ndarray, c2w: np.ndarray) -> np.ndarray:
    """
    Transform normal from world space to camera space.
    Args:
        normals_world: The normal in world space
        c2w: The camera to world matrix
    Returns:
        The normal in camera space
    """
    assert len(normals_world.shape) == 3, "Normal must be a 3D vector."
    H, W, C = normals_world.shape

    normals_camera = np.zeros((H, W, C), dtype=np.float32)
    
    if C == 4:
        normals_camera[..., 3] = normals_world[..., 3]
        normals_world = normals_world[..., :3]
    
    R_c2w = c2w[:3, :3]
    R_convert = np.array([
        [1, 0, 0],
        [0, 0, 1],
        [0, -1, 0]
    ], dtype=np.float32)

    R_opencv = R_c2w @ R_convert
    R_opencv = R_opencv.T

    transformed_normals = normals_world.reshape(-1, 3).T  
    transformed_normals = R_opencv @ transformed_normals
    transformed_normals = transformed_normals.T
    transformed_normals = transformed_normals.reshape(H, W, 3)

    normals_camera[..., :1] = transformed_normals[..., :1] * 0.5 + 0.5
    normals_camera[..., 2:3] = transformed_normals[..., 1:2] * -0.5 + 0.5
    normals_camera[..., 1:2] = transformed_normals[..., 2:3] * 0.5 + 0.5

    return normals_camera

class JsonDatasetAug(Dataset):
    def __init__(self, json_files=None, 
                 source_aug=None, source_aug_prob=0.1, resolution=1024, task='normal'):
        """
        Dataset class for processing JSON dataset files with augmentation support
        
        Args:
            json_files (list): List of JSON files containing dataset metadata
            source_aug: Optional augmentation transforms for source images
            source_aug_prob (float): Probability of applying source augmentation
            resolution (int): Target resolution for images
            task (str): Task type, e.g., 'normal', 'base_color', etc.
        """
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data.append(data_term)
        
        self.to_tensor = T.ToTensor()
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        self.task = task
        self.resolution = resolution
        self.resize_transform = T.Resize(self.resolution)
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self._load_item(idx)
        while True:
            try:
                return self._load_item(idx)
            except Exception as e:
                print(f"Error loading item {idx}: {e}")
                idx = random.randint(0, len(self.data) - 1)
    
    def _load_item(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        
        # Extract sequence directory from source_image path
        transforms_path = os.path.join(data_dir, item['source_image'])
        sequence_dir = os.path.dirname(transforms_path)

                
        if os.path.exists(transforms_path):
            

            with open(transforms_path, 'r') as f:
                transforms = json.load(f)
                

            # Get frames from transforms
            frames = transforms["frames"]
            if isinstance(frames, dict):
                # Randomly select an HDRI ID
                hdri_ids = list(frames.keys())
                random_hdri_id = random.choice(hdri_ids)
                
                # Randomly select a frame from the chosen HDRI
                frames_list = frames[random_hdri_id]
                
                # If hdri_id is not numeric, use its index
                hdri_id = hdri_ids.index(random_hdri_id)
                random_frame = random.choice(frames_list)
                frame_path = random_frame["file_path"]
                frame_id = int(frame_path.split("/")[-1].split("_")[0])
                
                # Load source image (HDRI) based on the randomly selected frame_id and hdri_id
                source_path = os.path.join(sequence_dir, f"{frame_id:03d}_hdri_{hdri_id:02d}.png")
            else:
                hdri_id = 0
                random_frame = random.choice(frames)
                frame_path = random_frame["file_path"]
                
                if False:
                    frame_id = int(frame_path.split("/")[-1].split(".")[0])
                    frame_id = f"{frame_id:03d}"
                else:
                    frame_id = frame_path.split("/")[-1].split(".")[0]
                source_path = os.path.join(sequence_dir, f"{frame_id}.png")
                full_frame_id = frame_id
                frame_id = frame_id.split("_")[0] + "_000" 
            # Store transform matrix for later use
            self.current_transform_matrix = random_frame.get("transform_matrix")
        else:
            exit(f"Error: transforms.json not found at {transforms_path}")
        prompt = item.get('prompt', "")
        
        source = self._load_image(source_path)
        source = source ** (1/2.2)

        # Load target based on specified task
        if self.task == 'normal':
            target_path = os.path.join(sequence_dir, "normal", f"{frame_id}_normal.exr")
            target = self._load_exr(target_path, item, task='normal')
        elif self.task == 'base_color':
            target_path = os.path.join(sequence_dir, "Base Color", f"{frame_id}_Base Color.png")
            target = self._load_image(target_path)
        elif self.task == 'material':
            metal_path = os.path.join(sequence_dir, "Metallic", f"{frame_id}_Metallic.exr")
            rough_path = os.path.join(sequence_dir, "Roughness", f"{frame_id}_Roughness.exr")
            metal_png = self._load_exr(metal_path)
            rough_png = self._load_exr(rough_path)
            target = torch.cat((torch.zeros_like(metal_png), metal_png, rough_png), dim=0)
        elif self.task == 'delight':
            albedo_path = os.path.join(sequence_dir, "albedo", f"{frame_id}_albedo.exr")
            diffdir_path = os.path.join(sequence_dir, "diffdir", full_frame_id.replace("_env", "_diffdir_env.exr"))
            target = self._load_exr(diffdir_path) * self._load_exr(albedo_path)
            target = target ** (1/2.2)
        elif self.task == 'specular':
            glossycol_path = os.path.join(sequence_dir, "glossycol", f"{frame_id}_glossycol.exr")
            glossydir_path = os.path.join(sequence_dir, "glossydir", full_frame_id.replace("_env", "_glossydir_env.exr"))
            target = self._load_exr(glossycol_path) * self._load_exr(glossydir_path)
            target = target ** (1/2.2)
            
        elif self.task == 'depth':
            target_path = os.path.join(sequence_dir, "depth", f"{frame_id}_depth.exr")
            target = self._load_exr_depth(target_path)
        else:
            raise ValueError(f"Unsupported task: {self.task}")
        
        # Load reference image
        if self.task == 'normal':
            # For normal task, use NOCS as reference if available, else use base color
            # ref_path = os.path.join(sequence_dir, "nocs", f"{frame_id}_nocs.exr")
            ref_path = os.path.join(sequence_dir, "low_normal", f"{frame_id}_low_normal.exr")
            if os.path.exists(ref_path):
                reference = self._load_exr(ref_path, item, task='normal')
            else:
                # Fallback to base color
                # ref_path = os.path.join(sequence_dir, "Base Color", f"{frame_id:03d}_Base Color.png")
                # reference = self._load_image(ref_path)
                reference = source
        else:
            # For other tasks, use normal map as reference
            ref_path = os.path.join(sequence_dir, "normal", f"{frame_id}_normal.exr")
            reference = self._load_exr(ref_path, item, task='normal')
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)
        
        # Normalize to [-1, 1]
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        source = self.resize_transform(source)
        target = self.resize_transform(target)
        reference = self.resize_transform(reference)
        
        # Handle normal map processing
        if self.task == 'normal':
            target[0] = -target[0]
            reference[0] = -reference[0]
            target_norm = torch.norm(target, p=2, dim=0, keepdim=True)
            valid_mask = (target_norm > 0.5) & (target_norm < 1.5)
        else:
            reference[0] = -reference[0]
            reference_norm = torch.norm(reference, p=2, dim=0, keepdim=True)
            valid_mask = (reference_norm > 0.5) & (reference_norm < 1.5)
        
        invalid_mask = ~valid_mask
        target[invalid_mask.repeat(3, 1, 1)] = 1.
        source[invalid_mask.repeat(3, 1, 1)] = 1.
        reference[invalid_mask.repeat(3, 1, 1)] = 1.
        
        # Concatenate valid mask with target
        target = torch.cat((target, valid_mask), 0)
        
        # Permute dimensions for output
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)
        
        return dict(jpg=target, txt=prompt, hint=source, ref=reference)

    def _load_image(self, path):
        """Load and convert a regular image file to tensor"""
        with open(path, 'rb') as f:
            img = Image.open(f).convert('RGB')
            img = self.to_tensor(img)
            return img
    
    def _load_exr(self, path, frame_data=None, task=None):
        """Load and process an EXR file (e.g., normal maps)"""
        if not os.path.exists(path):
            # Return a default tensor if file doesn't exist
            return torch.zeros(3, self.resolution, self.resolution)
        
        # Load the EXR file
        exr_data = pyexr.read(str(path))
        
        # For normal maps, transform from world to camera space using the current transform matrix
        if task == 'normal' and hasattr(self, 'current_transform_matrix') and self.current_transform_matrix:
            exr_data = blender_world_normal_2_camera(
                exr_data,
                np.array(self.current_transform_matrix)
            )
        
        # Convert to tensor (use only first 3 channels)
        tensor = torch.from_numpy(exr_data[..., :3].transpose(2, 0, 1)).float()
        return tensor
    
    def _load_exr_depth(self, path):
        """Load and process depth map from EXR file"""
        if not os.path.exists(path):
            return torch.zeros(3, self.resolution, self.resolution)
        
        depth_map = pyexr.read(str(path))
        
        # Process depth similar to WebDatasetConverter
        mask = depth_map[..., 0] > 1000
        depth_map[mask] = 0
        max_val = np.max(depth_map)
        depth_map_normalized = depth_map / max_val if max_val > 0 else depth_map
        
        # Create a 3-channel tensor (repeat depth for RGB channels)
        depth_tensor = torch.from_numpy(
            np.repeat(depth_map_normalized[..., 0:1], 3, axis=2).transpose(2, 0, 1)
        ).float()
        
        return depth_tensor



class JsonDatasetAug_plus_RandomIntrins(JsonDatasetAug):
    def __init__(self, json_files=None, 
                 source_aug=None, source_aug_prob=0.1, resolution=1024, task='normal'):
        """
        Dataset class that extends JsonDatasetAug with random intrinsics transformations
        
        Args:
            json_files (list): List of JSON files containing dataset metadata
            source_aug: Optional augmentation transforms for source images
            source_aug_prob (float): Probability of applying source augmentation
            resolution (int): Target resolution for images
            task (str): Task type, e.g., 'normal', 'base_color', etc.
        """
        super().__init__(json_files, source_aug, source_aug_prob, resolution, task)
        
        # Define aspect ratios for random intrinsics
        self.aspect_ratios = [
            (320, 960), (384, 800), (448, 672),
            (512, 608), (576, 544), (640, 480),
            (704, 448), (768, 416), (832, 384),
            (896, 352), (960, 320), (960, 540),
        ]
        
        # Update resize transform to ensure fixed output resolution
        self.resize_transform = T.Resize((self.resolution, self.resolution), 
                                         interpolation=Image.BILINEAR, 
                                         antialias=True)
    
    def random_aspect_crop(self, img):
        """Apply random aspect ratio crop based on predefined ratios"""
        crop_H, crop_W = random.choice(self.aspect_ratios)
        _, orig_H, orig_W = img.shape
        # height-based resizing
        new_H = random.randrange(min(orig_H, crop_H), max(orig_H, crop_H)+1)
        new_W = round((new_H / orig_H) * orig_W)
        
        # Resize using existing function from RandomIntrins
        img = resize(img, new_H=new_H, new_W=new_W)
            
        # Pad if necessary
        orig_H, orig_W = img.shape[1], img.shape[2]
        l, r, t, b = 0, 0, 0, 0
        if crop_H > orig_H:
            t = b = (crop_H - orig_H) // 2
        if crop_W > orig_W:
            l = r = (crop_W - orig_W) // 2
        
        # Apply padding
        if any([l, r, t, b]):
            img = pad(img, (l, r, t, b))
        
        # Crop
        assert img.shape[1] >= crop_H
        assert img.shape[2] >= crop_W
        x = random.randint(0, img.shape[2] - crop_W)
        y = random.randint(0, img.shape[1] - crop_H)
        img = crop(img, y=y, H=crop_H, x=x, W=crop_W)
        
        return img, (x, y, crop_W, crop_H)
    
    def rotate_normal_map_90(self, normal_map):
        """Rotate normal map 90 degrees clockwise and adjust vectors"""
        # Rotate image 90 degrees clockwise
        rotated_image = torch.rot90(normal_map, k=1, dims=[1, 2])
        
        # Swap x and y components and negate appropriately for 90-degree rotation
        x, y, z = rotated_image[0], rotated_image[1], rotated_image[2]
        rotated_image = torch.stack([-y, x, z], dim=0)
        
        return rotated_image
    
    def _load_item(self, idx):
        """
        Override _load_item to apply random intrinsics transformations
        """
        item = self.data[idx]
        data_dir = item['data_dir']
        
        # Extract sequence directory from source_image path
        transforms_path = os.path.join(data_dir, item['source_image'])
        sequence_dir = os.path.dirname(transforms_path)
                
        if os.path.exists(transforms_path):
            with open(transforms_path, 'r') as f:
                transforms = json.load(f)

            # Get frames from transforms
            frames = transforms["frames"]
            if isinstance(frames, dict):
                # Randomly select an HDRI ID
                hdri_ids = list(frames.keys())
                random_hdri_id = random.choice(hdri_ids)
                
                # Randomly select a frame from the chosen HDRI
                frames_list = frames[random_hdri_id]
                
                # If hdri_id is not numeric, use its index
                hdri_id = hdri_ids.index(random_hdri_id)
                random_frame = random.choice(frames_list)
                frame_path = random_frame["file_path"]
                frame_id = int(frame_path.split("/")[-1].split("_")[0])
                
                # Load source image (HDRI) based on the randomly selected frame_id and hdri_id
                source_path = os.path.join(sequence_dir, f"{frame_id:03d}_hdri_{hdri_id:02d}.png")
            else:
                hdri_id = 0
                random_frame = random.choice(frames)
                frame_path = random_frame["file_path"]
                frame_id = int(frame_path.split("/")[-1].split(".")[0])

                source_path = os.path.join(sequence_dir, f"{frame_id:03d}.png")
            # Store transform matrix for later use
            self.current_transform_matrix = random_frame.get("transform_matrix")
        else:
            raise FileNotFoundError(f"Error: transforms.json not found at {transforms_path}")
        
        prompt = item.get('prompt', "")
        
        # Load source image (HDRI) based on the randomly selected frame_id and hdri_id
        source = self._load_image(source_path)
        
        # Load target based on specified task
        if self.task == 'normal':
            target_path = os.path.join(sequence_dir, "normal", f"{frame_id:03d}_normal.exr")
            target = self._load_exr(target_path, item, task='normal')
        elif self.task == 'base_color':
            target_path = os.path.join(sequence_dir, "Base Color", f"{frame_id:03d}_Base Color.png")
            target = self._load_image(target_path)
        elif self.task == 'metallic':
            target_path = os.path.join(sequence_dir, "Metallic", f"{frame_id:03d}_metallic.png")
            target = self._load_image(target_path)
        elif self.task == 'roughness':
            target_path = os.path.join(sequence_dir, "Roughness", f"{frame_id:03d}_roughness.png")
            target = self._load_image(target_path)
        elif self.task == 'depth':
            target_path = os.path.join(sequence_dir, "depth", f"{frame_id:03d}_depth.exr")
            target = self._load_exr_depth(target_path)
        else:
            raise ValueError(f"Unsupported task: {self.task}")
        
        # Load reference image
        if self.task == 'normal':
            # For normal task, use NOCS as reference if available, else use base color
            ref_path = os.path.join(sequence_dir, "nocs", f"{frame_id:03d}_nocs.exr")
            if os.path.exists(ref_path):
                reference = self._load_exr(ref_path, item, task='normal')
                reference[0] = -reference[0]
            else:
                # Fallback to source image
                reference = source.clone()
        else:
            # For other tasks, use normal map as reference
            ref_path = os.path.join(sequence_dir, "normal", f"{frame_id:03d}_normal.exr")
            reference = self._load_exr(ref_path, item, task='normal')
        
        # Apply random aspect ratio crop - use same crop parameters for all
        _, (crop_params) = self.random_aspect_crop(source)
        x, y, new_W, new_H = crop_params
        source = source[:, y:y+new_H, x:x+new_W]
        target = target[:, y:y+new_H, x:x+new_W]
        reference = reference[:, y:y+new_H, x:x+new_W]
            
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)
        

        # Normalize to [-1, 1]
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        reference = (reference * 2.0) - 1.0
        
        # Resize all images to target resolution
        source = self.resize_transform(source)
        target = self.resize_transform(target)
        reference = self.resize_transform(reference)
        
        # Handle normal map processing
        if self.task == 'normal':
            target[0] = -target[0]
            target_norm = torch.norm(target, p=2, dim=0, keepdim=True)
            valid_mask = (target_norm > 0.5) & (target_norm < 1.5)
        else:
            reference[0] = -reference[0]
            reference_norm = torch.norm(reference, p=2, dim=0, keepdim=True)
            valid_mask = (reference_norm > 0.5) & (reference_norm < 1.5)
        
        invalid_mask = ~valid_mask
        if self.task == 'normal':
            target[2, invalid_mask[0]] = 1.
            reference[2, invalid_mask[0]] = 1.
        else:
            target[invalid_mask.repeat(3, 1, 1)] = 1.
            reference[invalid_mask.repeat(3, 1, 1)] = 1.
        source[invalid_mask.repeat(3, 1, 1)] = 1.

        # Concatenate valid mask with target
        target = torch.cat((target, valid_mask), 0)
        
        # Permute dimensions for output
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        reference = reference.permute(1, 2, 0)
        
        return dict(jpg=target, txt=prompt, hint=source, ref=reference)
        





class HybridVAEDatasetAugV2_plus_RandomIntrins(Dataset):
    def __init__(self, json_files=['./data/DIODE_normal/train.jsonl', './data/hypersim_normal/train.jsonl'],
                 source_aug=None, source_aug_prob=0.1, resolution=1024):
        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = data_dir
                    self.data += [data_term]
        self.to_Tensor = T.ToTensor()
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob
        
        # Final dimensions will always be 1024x576
        self.target_width = 1024
        self.target_height = 576
        
        # Define aspect ratios for random cropping
        self.aspect_ratios = [
            (320, 960), (384, 800), (448, 672),
            (512, 608), (576, 544), (640, 480),
            (704, 448), (768, 416), (832, 384),
            (896, 352), (960, 320),
        ]

    
    def random_aspect_crop(self, img):
        """Apply random aspect ratio crop based on Outpainting ratios"""
        crop_H, crop_W = random.choice(self.aspect_ratios)
        _, orig_H, orig_W = img.shape
        
        if crop_H > crop_W:
            new_H = orig_H
            new_W = int(crop_W * orig_W / crop_H)
        else:
            new_H = int(crop_H * orig_H / crop_W)
            new_W = orig_W
            
        # Ensure we have enough space for cropping
        if orig_H < new_H or orig_W < new_W:
            scale = max(new_H/orig_H, new_W/orig_W)
            img = TF.resize(img, (int(orig_H*scale), int(orig_W*scale)), antialias=True)
            orig_H, orig_W = int(orig_H*scale), int(orig_W*scale)
            
        # Random crop position
        x = random.randint(0, orig_W - new_W)
        y = random.randint(0, orig_H - new_H)
        
        return img[:, y:y+new_H, x:x+new_W], (x, y, new_W, new_H)
        
    def rotate_normal_map_90(self, normal_map):
        """Rotate normal map 90 degrees clockwise and adjust vectors"""
        # Rotate image 90 degrees clockwise
        rotated_image = torch.rot90(normal_map, k=1, dims=[1, 2])  # Changed to k=1 for proper orientation
        rotated_image = (rotated_image * 2.0) - 1.0
        
        # Swap x and y components and negate appropriately for 90-degree rotation
        x, y, z = rotated_image[0], rotated_image[1], rotated_image[2]
        # rotated_image = torch.stack([y, -x, z], dim=0)
        rotated_image = torch.stack([-y, x, z], dim=0)
        rotated_image = (rotated_image + 1) / 2
    
        return rotated_image
    
    def __len__(self):
        return len(self.data)
        
    def __getitem__(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        source_path = os.path.join(data_dir, item['refernce_image'])
        target_path = os.path.join(data_dir, item['image'])
        prompt = ""
        
        # Load and convert images to tensors
        source = self.to_Tensor(Image.open(source_path).convert('RGB'))
        target = self.to_Tensor(Image.open(target_path).convert('RGB'))
        sketch = self.to_Tensor(Image.open(os.path.join(data_dir, item['conditioning_image'])).convert('RGB'))
        
        # Random aspect ratio crop - use same crop parameters for all
        source, crop_params = self.random_aspect_crop(source)
        x, y, new_W, new_H = crop_params
        target = target[:, y:y+new_H, x:x+new_W]
        sketch = sketch[:, y:y+new_H, x:x+new_W]
        is_vertical = new_H > new_W
        
        if is_vertical:
            source = self.rotate_normal_map_90(source)
            target = self.rotate_normal_map_90(target)
            sketch = torch.rot90(sketch, k=1, dims=[1, 2])
        
        # Batch resize operations
        tensors = torch.stack([source, target, sketch])
        tensors = TF.resize(tensors, (576, 1024), antialias=True)
        source, target, sketch = tensors[0], tensors[1], tensors[2]

        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)
                
        # Normalize to [-1, 1]
        sketch = (sketch * 2) - 1
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0
        
        # Handle normal map processing
        target_norm = torch.norm(target, p=2, dim=0, keepdim=True)
        valid_mask = (target_norm > 0.5) & (target_norm < 1.5)
        invalid_mask = ~valid_mask
        source[0] = -source[0] # convert opencv to bae
        target[0] = -target[0]  # convert opencv to bae
        target[invalid_mask.repeat(3, 1, 1)] = 1.
        source[invalid_mask.repeat(3, 1, 1)] = 1.
        sketch[invalid_mask.repeat(3, 1, 1)] = 1.
        
        # Permute dimensions for output
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        sketch = sketch.permute(1, 2, 0)
        
        return dict(jpg=target, txt=prompt, hint=source, ref=sketch)




class NaiveDepthDataloder(Dataset):
    def __init__(self, json_files=['./data/DIODE_normal/train.jsonl'], 
    source_aug=None, source_aug_prob=0.1, resolution=512):

        self.data = []
        for json_file in json_files:
            with open(json_file, "r") as f:
                data_dir = os.path.dirname(json_file)
                for line in f:
                    data_term = json.loads(line)
                    data_term['data_dir'] = join(os.getcwd(), data_dir)

                    self.data += [data_term]
        
        self.to_Tensor = T.ToTensor()
        
        self.aspect_ratios = [
            (320, 960), (384, 800), (448, 672),
            (512, 608), (576, 544), (640, 480),
            (704, 448), (768, 416), (832, 384),
            (896, 352), (960, 320), (960, 540),
        ]



        self.resolution = resolution
        self.to_tensor = T.ToTensor()
        self.source_aug = source_aug
        self.source_aug_prob = source_aug_prob

        # Update resize transform to ensure fixed output resolution
        self.resize_transform = T.Resize((self.resolution, self.resolution), 
                                         interpolation=Image.BILINEAR, 
                                         antialias=True)

        

    def __len__(self):
        return len(self.data)
    
    

    def random_aspect_crop(self, img):
        """Apply random aspect ratio crop based on Outpainting ratios"""
        crop_H, crop_W = random.choice(self.aspect_ratios)
        # print('in random_aspect_crop', crop_H, crop_W)
        _, orig_H, orig_W = img.shape
        
        if crop_H > crop_W:
            new_H = orig_H
            new_W = int(crop_W * orig_W / crop_H)
        else:
            new_H = int(crop_H * orig_H / crop_W)
            new_W = orig_W
            
        # Ensure we have enough space for cropping
        if orig_H < new_H or orig_W < new_W:
            scale = max(new_H/orig_H, new_W/orig_W)
            img = TF.resize(img, (int(orig_H*scale), int(orig_W*scale)), antialias=True)
            orig_H, orig_W = int(orig_H*scale), int(orig_W*scale)
            
        # Random crop position
        x = random.randint(0, orig_W - new_W)
        y = random.randint(0, orig_H - new_H)
        
        return img[:, y:y+new_H, x:x+new_W], (x, y, new_W, new_H)


    def __getitem__(self, idx):
        try:
            return self._load_item(idx)
        except Exception as e:
            # If failed, try a random index
            random_idx = random.randint(0, len(self.data) - 1)
            return self._load_item(random_idx)


    def tmp_load(self,idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        prompt = item.get('prompt', "")
        source_path = os.path.join(data_dir, item['conditioning_image'])
        target_path = os.path.join(data_dir, item['image'])
        

        # Load and convert images to tensors
        source = cv2.imread((source_path))

        # Image.open(target_path).convert('RGB').save('b.jpg')
        depth_gt = cv2.imread(target_path, -1).astype(np.float32) 
        depth_gt = depth_gt / 1000.0

        return source, depth_gt

        
        



    def _load_item(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        prompt = item.get('prompt', "")
        source_path = os.path.join(data_dir, item['conditioning_image'])
        target_path = os.path.join(data_dir, item['image'])
        
        #* debug ===============================================================
        # src = Image.open(source_path)
        # if not exists(source_path) or src is None :
        #     print(f"Source path does not exist: {source_path}")
        #     return None
        # tgt=cv2.imread(target_path, -1)
        # if not exists(target_path) or tgt is None:
        #     print(f"Target path does not exist: {target_path}")
        #     return None
        # return 
        #* ===============================================================

        # Load and convert images to tensors
        source = self.to_Tensor(Image.open(source_path).convert('RGB'))

        # Image.open(target_path).convert('RGB').save('b.jpg')
        depth_gt = cv2.imread(target_path, -1).astype(np.float32) 
        depth_gt = depth_gt / 1000.0
        

        depth_gt = (depth_gt - depth_gt.min()) / (depth_gt.max() - depth_gt.min())
        target = self.to_Tensor(depth_gt)
        


        # Apply random aspect ratio crop - use same crop parameters for all
        # _, (crop_params) = self.random_aspect_crop(source)
        # x, y, new_W, new_H = crop_params
        # source = source[:, y:y+new_H, x:x+new_W]
        # target = target[:, y:y+new_H, x:x+new_W]
        

        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)


        # Normalize to [-1, 1]
        source = (source * 2.0) - 1.0
        target = (target * 2.0) - 1.0



        #* resize
        source = self.resize_transform(source)
        target = self.resize_transform(target)

        
        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)

        
        mask = (target < 1000.0)[:,:,0:1]
        
        return dict(jpg=target, txt= prompt, hint=source, mask =mask)




class DepthDataloder_normalizer(NaiveDepthDataloder):


    def __init__(self, json_files=['./data/DIODE_normal/train.jsonl'],
    source_aug=None, source_aug_prob=0.1, resolution=512, normalizer = None):
        super().__init__(json_files, source_aug, source_aug_prob, resolution)

        if normalizer is not None:
            self.normalizer = normalizer

            print(f'normalizer is not None, and intialized {type(self.normalizer)}')

    



    def _load_item(self, idx):
        item = self.data[idx]
        data_dir = item['data_dir']
        prompt = item.get('prompt', "")
        source_path = os.path.join(data_dir, item['conditioning_image'])
        target_path = os.path.join(data_dir, item['image'])
        
        
        target_path = os.path.join(data_dir, item['image'])

        # Load and convert images to tensors
        source = self.to_Tensor(Image.open(source_path).convert('RGB'))

        # Image.open(target_path).convert('RGB').save('b.jpg')
        depth_gt = cv2.imread(target_path, -1).astype(np.float32) 
        depth_gt = depth_gt / 1000.0
        
        target = self.to_Tensor(depth_gt)

        #* normalizer
        if self.normalizer is not None:
            target  = self.normalizer(target)    
            # print(f'quantile normalized')
        else:
            target  = (target - target.min()) / (target.max() - target.min())
            # print(f'common normalized')

        
        source = (source * 2.0) - 1.0


        # Apply random aspect ratio crop - use same crop parameters for all
        # _, (crop_params) = self.random_aspect_crop(source)
        # x, y, new_W, new_H = crop_params
        # source = source[:, y:y+new_H, x:x+new_W]
        # target = target[:, y:y+new_H, x:x+new_W]
        
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)


        #* resize
        source = self.resize_transform(source)
        target = self.resize_transform(target)

        source = source.permute(1, 2, 0)
        target = target.permute(1, 2, 0)
        

        return dict(jpg=target, txt= prompt, hint=source, mask =torch.ones(target.shape[:2]).unsqueeze(-1))
        



class DepthDataloder_normalizer_lotus(NaiveDepthDataloder):

    def __init__(self, json_files=['./data/lotus_depth/train.jsonl'],
    source_aug=None, source_aug_prob=0.1, resolution=512, normalizer = None):
        super().__init__(json_files, source_aug, source_aug_prob, resolution)

        if normalizer is not None:
            self.normalizer = normalizer

            print(f'normalizer is not None, and intialized {type(self.normalizer)}')



        #* load one demo sample 
        demo_idx = 0
        item = self.data[demo_idx]

        demp_depth = cv2.imread(os.path.join(item['data_dir'], item['image']), -1).astype(np.float32) / 1000.0
        logger.warning(f' original demp_depth: {demp_depth.shape} \t min: {demp_depth.min()} \t max: {demp_depth.max()}')

        
        h,w = demp_depth.shape[:2]

        if h > w:
            new_w = resolution
            new_h = int(resolution * h / w)
        else:
            new_h = resolution
            new_w = int(resolution * w / h)

        logger.warning(f'new_h: {new_h} \t new_w: {new_w}')
        #* update resize function 
        self.resize_transform = T.Resize((new_h, new_w), 
                                    interpolation=Image.BILINEAR, 
                                    antialias=True)

        logger.warning(f'dataset size: {len(self.data)}')



    def update_resizer(self, new_h,new_w):

        logger.warning(f'updating resizer from {self.resize_transform.size} ')
        self.resize_transform = T.Resize((new_h, new_w), 
                            interpolation=Image.BILINEAR, 
                            antialias=True)
                            
        logger.warning(f'to{self.resize_transform.size} ')

        
        

    def _load_item(self, idx):

        item = self.data[idx]
        data_dir = item['data_dir']
        prompt = item.get('prompt', "")
        source_path = os.path.join(data_dir, item['conditioning_image'])
        target_path = os.path.join(data_dir, item['image'])
        

        if False:
            mask_path = os.path.join(data_dir, item['mask'])
            mask = cv2.imread(mask_path, -1).astype(np.float32) /255. 
        
        
        
        # Load and convert images to tensors
        source = self.to_Tensor(Image.open(source_path).convert('RGB'))
        
        
        # Image.open(target_path).convert('RGB').save('b.jpg')
        depth_gt = cv2.imread(target_path, -1).astype(np.float32) 
        

        depth_gt = depth_gt / 1000.0
        target = self.to_Tensor(depth_gt)

        
        # if target.shape
        if target.shape[0] ==1:
            mask = target != 0.
        else:
            mask = target[0:1] != 0.

        
        
        
        #* normalizer
        if hasattr(self,'normalizer')  and self.normalizer is not None:
            # target  = self.normalizer(target)    
            target  = self.normalizer(target, valid_mask = mask)    
            
            # print(f'quantile normalized')
        else:
            target  = (target - target.min()) / (target.max() - target.min())
            # print(f'common normalized')

        
        source = (source * 2.0) - 1.0
        target = target.clip(-1,1)
        source = source.clip(-1,1)


        # Apply random aspect ratio crop - use same crop parameters for all
        # _, (crop_params) = self.random_aspect_crop(source)
        # x, y, new_W, new_H = crop_params
        # source = source[:, y:y+new_H, x:x+new_W]
        # target = target[:, y:y+new_H, x:x+new_W]
        
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)


        #* resize 
        source = self.resize_transform(source)
        target = self.resize_transform(target)
        mask = self.resize_transform(mask)

        # source = source.permute(1, 2, 0)
        # target = target.permute(1, 2, 0)


        #* ensure there are multiple channels
        if target.shape[0] == 1:
            target = target.repeat(3, 1, 1)
        
        
        return dict(depth_values=target, txt= prompt, pixel_values=source, valid_mask_values = mask)

        


class DepthDataloderMoGeOut(DepthDataloder_normalizer_lotus):

    def __init__(self, json_files=['./data/lotus_depth/train.jsonl'],
    source_aug=None, source_aug_prob=0.1, resolution=512, normalizer = None):
        super().__init__(json_files, source_aug, source_aug_prob, resolution,normalizer)

        self.d_min = 1e-5
        self.d_max = 80
        


    def _get_valid_mask(self, depth: torch.Tensor):
        valid_mask = torch.logical_and(
            (depth > self.d_min), (depth < self.d_max)
        ).bool()
        return valid_mask

        

    def _load_item(self, idx):

        item = self.data[idx]
        data_dir = item['data_dir']
        prompt = item.get('prompt', "")
        
        source_path = os.path.join(data_dir, item['conditioning_image'])
        target_path = os.path.join(data_dir, item['image'])

        
        coarse_depth_path = os.path.join(data_dir, item['coarse_depth'])
        # pred_intrinsic_path = os.path.join(data_dir, item['pred_intrinsic'])
        
        fg_mask =None
        if item.get('mask', None) is not None:
            mask_path = os.path.join(data_dir, item['mask'])
            fg_mask = cv2.imread(mask_path, -1).astype(np.float32) /255. 
        
        
        
        # Load and convert images to tensors
        source = self.to_Tensor(Image.open(source_path).convert('RGB'))

        depth_gt = cv2.imread(target_path, -1).astype(np.float32) 
        
        depth_gt = depth_gt / 1000.0
        target = self.to_Tensor(depth_gt)


        
        #todo load coarse depth 
        coarse_depth = np.load(coarse_depth_path)
        coarse_depth = self.to_Tensor(coarse_depth)

        #* get the mask of large shift 
        """
            #! debug  pixel shift 
            error_map = (target - coarse_depth).abs()
            logger.warning(f'error_map: {error_map.shape} \t min: {error_map.min()} \t max: {error_map.max()}, \t mean: {error_map.mean()}')
            debug2_ = colorize_depth_map(error_map.cpu().numpy().squeeze())
            error_map = self.to_Tensor(colorize_depth_map(error_map.cpu().numpy().squeeze()))
        """


        coarse_mask = coarse_depth.isfinite() #* filter infinite 
        
        # if target.shape
        if target.shape[0] ==1:
            mask = target != 0.
        else:
            mask = target[0:1] != 0.


        

        raw_mask = self._get_valid_mask(target)
        if fg_mask is not None:
            mask = mask & coarse_mask & self.to_Tensor(fg_mask).bool() & raw_mask
        else:
            mask = mask & coarse_mask & raw_mask


        #todo align value range 
        # print(coarse_depth.shape, coarse_depth.max(), coarse_depth.min(), target.mean()) 
        # print(target.shape, target.max(), target.min(), target.mean()) 
        # coarse_depth = coarse_depth * np.median(target[mask]) / np.median(coarse_depth[mask])
        
        target = target * np.median(coarse_depth[mask]) / np.median(target[mask])


        """"
        #!  debug pixel shift

        """
        error_map2 = (target - coarse_depth).abs()
        # if error_map2.numel() == 0 or (error_map2 > 0).sum() == 0 :
        #     logger.error(f'error_map2 is None , {error_map2.shape}')
        # else:
        error_map2[~mask] =  0
        _max = torch.quantile(error_map2, 0.99)

        non_ood_shift_map = error_map2 <= _max 
        mask = mask & non_ood_shift_map


        # colorize_depth_map(non_ood_shift_map.squeeze().cpu().float().numpy() ).save('non_ood_shift_map.jpg')
        # colorize_depth_map((error_map2 *non_ood_shift_map).cpu().numpy().squeeze() ).save('error_map_new.jpg')
        
        

        # logger.warning(f'error_map: {error_map2.shape} \t min: {error_map2.min()} \t max: {error_map2.max()}, \t mean: {error_map2.mean()}')
        # error_map2 = self.to_Tensor(colorize_depth_map(error_map2.cpu().numpy().squeeze()))


        #todo mask the  pixel with a large  value shift 

        #* normalizer
        if hasattr(self,'normalizer')  and self.normalizer is not None:
            target  = self.normalizer(target, valid_mask = mask)
            coarse_depth  = self.normalizer(coarse_depth, valid_mask = mask)
        else:
            target  = (target - target.min()) / (target.max() - target.min())
            coarse_depth  = (coarse_depth - coarse_depth.min()) / (coarse_depth.max() - coarse_depth.min())
            # print(f'common normalized')

        
        source = (source * 2.0) - 1.0

        coarse_depth = coarse_depth.clip(-1,1)

        target = target.clip(-1,1)
        source = source.clip(-1,1)


        # Apply random aspect ratio crop - use same crop parameters for all
        # _, (crop_params) = self.random_aspect_crop(source)
        # x, y, new_W, new_H = crop_params
        # source = source[:, y:y+new_H, x:x+new_W]
        # target = target[:, y:y+new_H, x:x+new_W]
        
        
        # Apply source augmentation if specified
        if self.source_aug is not None and random.random() < self.source_aug_prob:
            source = self.source_aug(source)


        #* resize 
        source = self.resize_transform(source)
        target = self.resize_transform(target)
        mask = self.resize_transform(mask)
        coarse_depth = self.resize_transform(coarse_depth)


        # source = source.permute(1, 2, 0)
        # target = target.permute(1, 2, 0)


        #* ensure there are multiple channels
        if target.shape[0] == 1:
            target = target.repeat(3, 1, 1)


        if coarse_depth.shape[0] == 1:
            coarse_depth = coarse_depth.repeat(3, 1, 1)

        
        return dict(depth_values=target, txt= prompt, pixel_values=coarse_depth, 
        valid_mask_values = mask, hint_path=source_path)

        


if __name__ == '__main__':
    
    import matplotlib.pyplot as plt

    # Create output directory
    output_dir = 'logs/dataset_test_results_xyz_moge_align'
    # output_dir = 'logs/dataset_test_results_booster'
    # output_dir = 'logs/dataset_test_results_cleargrasp'
    os.makedirs(output_dir, exist_ok=True)
    


    import torchvision.transforms as transforms
    from PIL import Image
    # from augmentations.appearance import *
    from data_utils import ScaleShiftDepthNormalizer,ScaleShiftDisparityNormalizer


    # appearance_aug = transforms.Compose([
    #             JpegCompress(min_quality=10, max_quality=90, p=0.3),
    #             GaussianBlur(ks=11, sigma=(0.1, 5.0), p=0.3),
    #             MotionBlur(ks=(1,11), p=0.3),
    #             GaussianNoise(sigma=(0.01, 0.05), p=0.1),
    #         ])


    normalizer = ScaleShiftDisparityNormalizer(
        norm_min=-1,
        norm_max=1,
        min_max_quantile=0.02,
        clip=True,
    )


    resolution = 576
    # dataset = DepthDataloder_normalizer_lotus(json_files=['data/cleargrasp_processed/train.jsonl'],
    #         resolution=resolution, normalizer = normalizer)

    # dataset = DepthDataloder_normalizer_lotus(json_files=['data/XYZ/train.jsonl'],
    #         resolution=resolution, normalizer = normalizer)


    dataset = DepthDataloderMoGeOut(json_files=['data/XYZ/debug.jsonl'],
            resolution=resolution, normalizer = normalizer)

    # dataset = DepthDataloderMoGeOut(json_files=['data/booster_processed/train_moge.jsonl'],
    #         resolution=resolution, normalizer = normalizer)

    # dataset = DepthDataloder_normalizer_lotus(json_files=['data/booster_processed/train.jsonl'],
    #         resolution=resolution, normalizer = normalizer)
    
    

    with open(os.path.join(output_dir, 'dataset_info.txt'), 'w') as f:
        f.write(f"Dataset size: {len(dataset)}\n")    
        
        for idx in  range(3):
            sample = dataset[idx]


            f.write("\nSample keys: " + str(sample.keys()) + "\n")
            f.write("\nShapes:\n")
            for key, value in tqdm(sample.items(),desc = 'sample info :'):
                if isinstance(value, torch.Tensor):
                    f.write(f"{key}: {value.shape} \t max: {value.max()} \t min: {value.min()}\n")
    
            f.write("\n \n \n")
    


    
    # Save sample visualizations
    for i in tqdm(range(min(5, len(dataset)))):  # Save first 5 samples
        sample = dataset[i]
        
        
        plt.figure(figsize=(15, 5))
        
        plt.subplot(131)
        # plt.imshow((sample['jpg'].numpy() + 1) / 2)
        plt.imshow((sample['depth_values'].permute(1,2,0).numpy() + 1) / 2)
        plt.title('Target')
        plt.axis('off')
        
        
        plt.subplot(132)
        # plt.imshow((sample['hint'].numpy() + 1) / 2)
        plt.imshow((sample['pixel_values'].permute(1,2,0).numpy() + 1) / 2)
        plt.title('Source')
        plt.axis('off')
        


        plt.subplot(133)
        # plt.imshow(sample['mask'].numpy())
        
        plt.imshow(sample['valid_mask_values'].permute(1,2,0).numpy())
        plt.title('Mask')
        # plt.title(sample['hint_path'])
        plt.axis('off')
        


        # plt.subplot(133)
        # plt.imshow((sample['ref'].numpy() + 1) / 2)
        # plt.title('Reference')
        # plt.axis('off')

        
        

        plt.savefig(os.path.join(output_dir, f'sample_{i}.png'), 
                   bbox_inches='tight', 
                   pad_inches=0.1,
                   dpi=300)
                   
        plt.close()

    
    print(f"Results saved to {output_dir}")



    """
    by chongjie 
    # Initialize dataset
    # dataset = NpzDatasetAug_plus_RandomIntrins(
    #     json_files=['./data/train.jsonl'], task='normal'
    # )
    # dataset = NormalEnhancementDatasetAug(json_files=['./data/objaverse_camera_normal_refine/train.jsonl'], task='normal')
    # dataset = JsonDatasetAug(json_files=['./render_data/train.jsonl'], task='normal')
    # dataset = JsonDatasetAug_plus_RandomIntrins(json_files=['./render_data/train_randombg.jsonl'], task='normal')
    # dataset = JsonDatasetAug_plus_RandomIntrins(json_files=['./render_data/train_pbr_multiobject.jsonl'], task='base_color')
    # dataset = JsonDatasetAug_plus_RandomIntrins(json_files=['data/DiffusionRender/train.jsonl'], task='base_color')
    # dataset = JsonDatasetAug_plus_RandomIntrins(json_files=['./render_data/train_randombg.jsonl'], task='base_color')
    
    # dataset = JsonDatasetAug(json_files=['render_data/train.jsonl'], task='base_color', resolution=512)
    # dataset = JsonDatasetAug(json_files=['render_data/train_0504.jsonl'], task='normal', resolution=512)
    dataset = JsonDatasetAug(json_files=['render_data/train_0504.jsonl'], task='normal', resolution=768)
    
    
    # dataset = JsonDatasetAug(json_files=['./render_data/train_randombg.jsonl'], task='base_color', resolution=512)
    
    # dataset = JsonDatasetAug(json_files=['./debug_data/train.jsonl'], task='normal')
    # dataset = JsonDatasetAug(json_files=['./render_data/train.jsonl'], task='base_color')
    # dataset = ImageDatasetAug(json_files=['./data/OmniNOCS/objectron/val.jsonl'], task='nocs')
    # dataset = NpzDatasetAug_plus_RandomIntrins(
    #     json_files=['./data/train.jsonl'], task='normal'
    # )

    
    # Test single item and save info
    print(f"Dataset size: {len(dataset)}")
    with open(os.path.join(output_dir, 'dataset_info.txt'), 'w') as f:
        f.write(f"Dataset size: {len(dataset)}\n")
        
        sample = dataset[0]
        f.write("\nSample keys: " + str(sample.keys()) + "\n")
        f.write("\nShapes:\n")
        for key, value in sample.items():
            if isinstance(value, torch.Tensor):
                f.write(f"{key}: {value.shape}\n")
        
    # Save sample visualizations
    for i in range(min(5, len(dataset))):  # Save first 5 samples
        sample = dataset[i]
        
        # Create directory for each sample
        sample_dir = os.path.join(output_dir, f'sample_{i}')
        os.makedirs(sample_dir, exist_ok=True)
        
        # Save target (jpg) in RGBA format
        target_rgba = (sample['jpg'].numpy() + 1) / 2
        target_rgba = (target_rgba * 255).astype(np.uint8)
        # convert RGBA to RGB
        if target_rgba.shape[2] == 4:
            target_rgba = target_rgba[:, :, :3]
        Image.fromarray(target_rgba).save(os.path.join(sample_dir, 'target.png'))
        
        # Save hint (source) in RGBA format
        hint_rgba = (sample['hint'].numpy() + 1) / 2
        hint_rgba = (hint_rgba * 255).astype(np.uint8)
        if hint_rgba.shape[2] == 4:
            hint_rgba = hint_rgba[:, :, :3]
        
        # if hint_rgba.shape[2] == 3:
        #     mask = (sample['jpg'][:, :, 3].numpy() * 255).astype(np.uint8)
        #     hint_rgba = np.dstack((hint_rgba, mask))
        Image.fromarray(hint_rgba).save(os.path.join(sample_dir, 'hint.png'))
        
        # Save reference (ref) in RGBA format
        ref_rgba = (sample['ref'].numpy() + 1) / 2
        ref_rgba = (ref_rgba * 255).astype(np.uint8)
        # convert RGBA to RGB
        if ref_rgba.shape[2] == 4:
            ref_rgba = ref_rgba[:, :, :3]    
        Image.fromarray(ref_rgba).save(os.path.join(sample_dir, 'ref.png'))
        
        # Combine jpg and hint together, split by a diagonal line
        combined = np.zeros_like(target_rgba)
        h, w, _ = combined.shape
        for y in range(h):
            for x in range(w):
                if x < y * w / h:
                    combined[y, x] = target_rgba[y, x]
                else:
                    combined[y, x] = hint_rgba[y, x]
        
        Image.fromarray(combined).save(os.path.join(sample_dir, 'combined.png'))
    
    print(f"Results saved to {output_dir}")

    # count = 0
    # while count < 300:  # Save first 5 sample
    #     sample = dataset[random.randint(0,500000)]
        
    #     # Create directory for each sample

    #     # sample_dir = os.path.join(output_dir, f'sample_{i}')
    #     # os.makedirs(sample_dir, exist_ok=True)
        
    #     # Save target (jpg) in RGBA format
    #     target_rgba = (cv2.resize(sample['jpg'].numpy(), (256, 256), interpolation=cv2.INTER_CUBIC) + 1) / 2
    #     target_rgba = (target_rgba * 255).astype(np.uint8)
    #     canny=cv2.Canny(target_rgba[:,:,:3],50,200)
    #     if canny.sum() / 255 > 1000:
    #         # Image.fromarray(target_rgba).save(os.path.join(sample_dir, 'target.png'))
            
    #         # Save hint (source) in RGBA format
    #         hint_rgba = (cv2.resize(sample['hint'].numpy(), (256, 256), interpolation=cv2.INTER_CUBIC) + 1) / 2
    #         hint_rgba = (hint_rgba * 255).astype(np.uint8)
    #         if hint_rgba.shape[2] == 3:
    #             mask = (cv2.resize(sample['jpg'].numpy(), (256, 256), interpolation=cv2.INTER_CUBIC)[:, :, 3] * 255).astype(np.uint8)
    #             hint_rgba = np.dstack((hint_rgba, mask))
    #         # Image.fromarray(hint_rgba).save(os.path.join(sample_dir, 'hint.png'))
            
    #         # Save reference (ref) in RGBA format
    #         ref_rgba = (cv2.resize(sample['ref'].numpy(), (256, 256), interpolation=cv2.INTER_CUBIC) + 1) / 2
    #         ref_rgba = (ref_rgba * 255).astype(np.uint8)
    #         # Image.fromarray(ref_rgba).save(os.path.join(sample_dir, 'ref.png'))
            
    #         # Combine jpg and hint together, split by a diagonal line
    #         combined = np.zeros_like(target_rgba)
    #         h, w, _ = combined.shape
    #         for y in range(h):
    #             for x in range(w):
    #                 if x < y * w / h:
    #                     combined[y, x] = target_rgba[y, x]
    #                 else:
    #                     combined[y, x] = hint_rgba[y, x]
    #         if count == 0:
    #             final_image = combined
    #         else:
    #             final_image = np.concatenate([final_image, combined], axis=0)
    #         count += 1
    #     # Image.fromarray(combined).save(os.path.join(sample_dir, 'combined.png'))

    # save_image = np.concatenate([final_image[i*20*256:i*20*256+20*256] for i in range(15)], axis=1)
    # Image.fromarray(save_image).save('combined.png')
    # print(f"Results saved to {output_dir}")




    """

    

