import torch.nn.functional as F
import matplotlib.pyplot as plt
import json
import pdb 
import argparse
from os.path import join, split, exists, isdir,isfile,dirname
import random
import datetime
from loguru import logger 
from daniel_tools.img_train_utils import *
from torch.utils.data import Dataset, DataLoader
from PIL import Image 
import pyexr
from torchvision.transforms import  ToPILImage
import os 
import einops
from loguru import logger
import torchvision


class TestsetLoader(Dataset):

    def __init__(self, root,  image_resolution, jsons=['test.jsonl'], debug = False):
        super().__init__()
        
        all_data = []
        for json_name in jsons:
            with open(os.path.join(root,json_name ), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    data_term['image'] = os.path.join(root, data_term['image'])
                    data_term['conditioning_image'] = os.path.join(root, data_term['conditioning_image'])
                    all_data += [data_term]
                

        if debug :
            logger.error(f'debug mode')
            # all_data = all_data[:200]
            # all_data = all_data[:20]
            all_data = all_data[:100]
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data
        self.image_resolution = image_resolution

        # self.transform = ...

    def __len__(self, ):
        return len(self.all_data)

    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        if input_image_path.endswith('.png'):
            input_image_path = input_image_path.replace('png','jpg')

        gt_image_path = item['image']

        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        
        input_image = center_crop(cv2.imread(input_image_path))
        height, width = input_image.shape[:2]


        # For Reading png
        gt_normal = center_crop(cv2.imread(gt_image_path))
        gt_normal = cv2.cvtColor(gt_normal, cv2.COLOR_BGR2RGB)
        gt_normal = (gt_normal.astype(np.float32) / 127.5) - 1.0


        # Resize normal
        gt_normal = resize_image(gt_normal, self.image_resolution)
        gt_normal = torch.tensor(gt_normal).permute((2, 0, 1)).float()
        gt_normal, gt_mask = norm_normalize(gt_normal) #* [-1, 1 ]
        #!=============================================================================
        gt_normal[~gt_mask] = -1
        gt_normal[0] = -gt_normal[0]
        #!=============================================================================

        raw_input_image = HWC3(input_image)
        img = resize_image(raw_input_image, self.image_resolution)

        img = torch.from_numpy(img[:, :, ::-1].copy()).float() / 255.0
        img = (img * 2) - 1
        

        img = einops.rearrange(img, 'h w c -> c h w').clone()
        

        
        #todo vis check
        # Image.fromarray(((einops.rearrange(img, 'c h w -> h w c').numpy() + 1) /2  * 255).astype(np.uint8)).save('img.jpg')
        # Image.fromarray(((einops.rearrange(gt_normal, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_normal.jpg')
        # Image.fromarray(((einops.rearrange(gt_mask, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_mask.jpg')
        
        return dict(hint = img, jpg = gt_normal, mask = gt_mask.sum(0).unsqueeze(0))




class DepthLoader(Dataset):

    def __init__(self, jsons=['test.jsonl']):
        super().__init__()
        
        all_data = []
        for json_name in jsons:
            root = dirname(json_name)
            with open(os.path.join(root,json_name ), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    data_term['image'] = os.path.join(root, data_term['image'])
                    data_term['conditioning_image'] = os.path.join(root, data_term['conditioning_image'])
                    all_data += [data_term]
                

        
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data

        self.transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])

    def __len__(self, ):
        return len(self.all_data)

    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        gt_image_path = item['image']


        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        
        
        
        test_image = Image.open(input_image_path).convert('RGB')
        test_image = np.array(test_image).astype(np.float32)
        test_image = test_image / 127.5 - 1.0 
        test_image = self.transforms(test_image) #* B, H, W
        


        
        depth_gt = cv2.imread(gt_image_path, -1).astype(np.float32) 
        depth_gt = depth_gt / 1000.0
        depth_gt = self.transforms(depth_gt)


        #* depth uint is m 
        return dict(hint = test_image, jpg = depth_gt,hint_path =item['conditioning_image'], jpg_path = item['image'] )



class InferDepthLoader(Dataset):

    def __init__(self, jsons=['test.jsonl']):
        super().__init__()
        
        all_data = []
        for json_name in jsons:
            root = dirname(json_name)
            with open(os.path.join(root,json_name ), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    # data_term['image'] = os.path.join(root, data_term['image'])
                    if data_term.get('conditioning_image',None) is not None :
                        data_term['conditioning_image'] = os.path.join(root, data_term['conditioning_image'])
                    #!  for tricky
                    else:
                        data_term['conditioning_image'] = os.path.join(root, data_term['data_path'])

                    all_data += [data_term]
                

        
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data

        self.transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])

    def __len__(self, ):
        return len(self.all_data)

    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        

        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        
        
        test_image = Image.open(input_image_path).convert('RGB')
        test_image = np.array(test_image).astype(np.float32)
        test_image = test_image / 127.5 - 1.0 
        test_image = self.transforms(test_image) #* B, H, W
        


        #* depth uint is m 
        return dict(hint = test_image,hint_path =item['conditioning_image'], )



class InferDepthNpyLoader(InferDepthLoader):


    def __init__(self, jsons=['test.jsonl'],normalizer =None ):
        super().__init__(jsons=jsons)


        if normalizer is not None: 
            self.normalizer = normalizer
            print(f' using normalizer {type(normalizer)}')
        else:
            from data_utils import ScaleShiftDisparityNormalizer
            normalizer = ScaleShiftDisparityNormalizer(
                norm_min=-1,
                norm_max=1,
                min_max_quantile=0.02,
                clip=True,
            )
            self.normalizer = normalizer

            print(f' using default normalizer {type(normalizer)}')
        



    def __getitem__(self, idx):

        item = self.all_data[idx]

        coarse_depth_path = item['conditioning_image']


        coarse_depth = np.load(coarse_depth_path)
        
        coarse_depth = self.transforms(coarse_depth)

        # coarse_mask = coarse_depth.isfinite() #* filter infinite 
        
    
        
        assert self.normalizer is not None, 'normalizer should not be None'

        coarse_depth = self.normalizer(coarse_depth) #* [-1, 1 ]
        
        
        if coarse_depth.shape[0] ==1:
            coarse_depth = coarse_depth.repeat(3,1,1)

    

        #* depth uint is m 
        return dict(hint = coarse_depth,hint_path =item['conditioning_image'], )



class LightRenderLoader(TestsetLoader):

    def __init__(self, root, image_resolution, jsons=['test.jsonl']):
        super().__init__(root)

        all_data = []
        for json_name in jsons:
            with open(os.path.join(root, json_name), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    data_term['image'] = os.path.join(input_dir, data_term['image'])
                    data_term['conditioning_image'] = os.path.join(input_dir, data_term['conditioning_image'])
                    data_term['transform_matrix'] =  data_term['transform_matrix']
                    all_data += [data_term]
        # all_data = all_data[:200]
        # all_data = all_data[:20]
        # all_data = all_data[:100]
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data
        self.image_resolution = image_resolution



    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        gt_image_path = item['image']

        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        input_image = center_crop(cv2.imread(input_image_path))
        height, width = input_image.shape[:2]


        # For Reading png
        #!=============================================================================
        normal_world = pyexr.read(str(gt_image_path))
        normal_camera = blender_world_normal_2_camera(
            normal_world, 
            np.array(item["transform_matrix"])
        )
        gt_normal = normal_camera[..., :3] *2 -1
        gt_normal = center_crop(gt_normal)
        #!=============================================================================

        
        # gt_normal = center_crop(cv2.imread(gt_image_path))
        # gt_normal = cv2.cvtColor(gt_normal, cv2.COLOR_BGR2RGB)
        # gt_normal = (gt_normal.astype(np.float32) / 127.5) - 1.0


        # Resize normal
        gt_normal = resize_image(gt_normal, self.image_resolution)
        gt_normal = torch.tensor(gt_normal).permute((2, 0, 1)).float()
        gt_normal, gt_mask = norm_normalize(gt_normal) #* [-1, 1 ]
        
        gt_normal[~gt_mask] = -1
        gt_normal[0] = -gt_normal[0]
        

        raw_input_image = HWC3(input_image)
        img = resize_image(raw_input_image, self.image_resolution)

        img = torch.from_numpy(img[:, :, ::-1].copy()).float() / 255.0
        img = (img * 2) - 1
        

        img = einops.rearrange(img, 'h w c -> c h w').clone()
        

        
        #todo vis check
        # Image.fromarray(((einops.rearrange(img, 'c h w -> h w c').numpy() + 1) /2  * 255).astype(np.uint8)).save('img.jpg')
        # Image.fromarray(((einops.rearrange(gt_normal, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_normal.jpg')
        # Image.fromarray(((einops.rearrange(gt_mask, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_mask.jpg')
        

        return dict(hint = img, jpg = gt_normal, mask = gt_mask.sum(0).unsqueeze(0))


    






class InferenceLoader(Dataset):

    def __init__(self, root, list_path_name = "demo_list.txt",image_resolution = 512):
        super().__init__()
        
        all_data = []
        with open(os.path.join(root,list_path_name ), "r") as f:
            for line in f.readlines():
                line = line.strip('\n')

                data_term ={}
                data_term['conditioning_image'] = os.path.join(root, line)
                all_data += [data_term]
                
        # all_data = all_data[:200]
        # all_data = all_data[:20]
        # all_data = all_data[:100]
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data
        self.image_resolution =image_resolution
        

        # self.transform = ...

    def __len__(self, ):
        return len(self.all_data)

    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        

        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        
        
        input_image = center_crop(cv2.imread(input_image_path))
        height, width = input_image.shape[:2]


        raw_input_image = HWC3(input_image)
        img = resize_image(raw_input_image, self.image_resolution)
        
        img = torch.from_numpy(img[:, :, ::-1].copy()).float() / 255.0
        img = (img * 2) - 1
        

        img = einops.rearrange(img, 'h w c -> c h w').clone()

        
        #todo vis check
        # Image.fromarray(((einops.rearrange(img, 'c h w -> h w c').numpy() + 1) /2  * 255).astype(np.uint8)).save('img.jpg')
        # Image.fromarray(((einops.rearrange(gt_normal, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_normal.jpg')
        # Image.fromarray(((einops.rearrange(gt_mask, 'c h w -> h w c').numpy() + 1) /2  * 255 ).astype(np.uint8) ).save('gt_mask.jpg')
        


        return dict(hint = img, suggested_save_name = '#'.join(input_image_path.split('/')[-3:-1]) +f'#{input_image_name}')




if __name__ == "__main__":

    

    depth_loader = InferDepthNpyLoader(jsons=['/share/project/cwm/shaocong.xu/exp/Lotus/data/MoGe_submission/test.jsonl'])

    
    


    
    # depth_loader = DepthLoader(jsons=['/share/project/cwm/shaocong.xu/exp/Lotus/data/cleargrasp-dataset-test-val_processed_synthetic_test/test.jsonl'])

    # depth_loader = TestsetLoader(f"data/cleargrasp_normal",image_resolution = 512, jsons=['train.jsonl'])
    
    


    to_imger = ToPILImage()
    """
    """
    # loader = TestsetLoader("/baai-cwm-1/baai_cwm_ml/cwm/shaocong.xu/exp/stable_normal/data/lucesMV",  512)
    
    
    
    save_dir = 'logs/InferDepthNpyLoader_demo'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    
    for sample_idx in np.random.choice(range(len(depth_loader)), 5, replace=False):

        sample = depth_loader[sample_idx]
        for key, value in sample.items():
            if not isinstance(value, str):
                if key != 'mask' :
                    img = to_imger((value + 1) /2)
                    img.save(f"{save_dir}/{key}-{sample_idx:06d}.jpg")
                else:
                    
                    img = to_imger(value.float())
                    img.save(f"{save_dir}/{key}-{sample_idx:06d}.jpg")



    exit(0)
    root = '/baai-cwm-1/baai_cwm_ml/cwm/shaocong.xu/exp/stable_normal/data/DREDS'
    loader = InferenceLoader(root, 'demo_list.txt')
    

    save_dir = 'logs/demo_inference_path'

    if not exists(save_dir):
        os.makedirs(save_dir)

    for sample in loader:

        for key, value in sample.items():
            
            img = to_imger((value + 1) /2)
            
            img.save(f"{save_dir}/{key}.jpg")


        break
