
import os 
from os.path import join, exists
import cv2
from PIL import Image
from daniel_tools.img_utils import * 
from prior_depth_anything.utils import colorize_depth_maps,chw2hwc
import torch
from prior_depth_anything import PriorDepthAnything
from torch.utils.data import Dataset


from os.path import dirname
import json
from loguru import logger
import torchvision
from tqdm import tqdm



class InferDepthLoader(Dataset):

    def __init__(self, jsons=['test.jsonl']):
        super().__init__()
        
        all_data = []
        for json_name in jsons:
            root = dirname(json_name)
            with open(os.path.join(root,json_name ), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    # data_term['image'] = os.path.join(root, data_term['image'])
                    if data_term.get('conditioning_image',None) is not None :
                        data_term['conditioning_image'] = os.path.join(root, data_term['conditioning_image'])
                    #!  for tricky
                    else:
                        data_term['conditioning_image'] = os.path.join(root, data_term['data_path'])

                    all_data += [data_term]
                

        
        logger.warning(f"data size is {len(all_data)}")
        

        self.all_data = all_data


        self.transforms = None
        # self.transforms = torchvision.transforms.Compose([
        #     torchvision.transforms.ToTensor(),
        # ])

    def __len__(self, ):
        return len(self.all_data)

    def __getitem__(self,idx):

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']
        

        input_image_dir = input_image_path.split('/')[-2]
        input_image_name = input_image_path.split('/')[-1][:-4]
        
        
        test_image = Image.open(input_image_path).convert('RGB')
        test_image = np.array(test_image).astype(np.float32)
        test_image = test_image / 127.5 - 1.0 
        if self.transforms is not None:
            #* B, H, W
            test_image = self.transforms(test_image) #* B, H, W
        


        #* depth uint is m 
        return dict(hint = test_image,hint_path =item['conditioning_image'], )




def depth2colordepth(gt_depth):
    value_colored = colorize_depth_maps((gt_depth  - gt_depth.min()) / (gt_depth.max() - gt_depth.min()), 0, 1, cmap="Spectral").squeeze()
    value_colored = (value_colored * 255).astype(np.uint8)
    value_colored = Image.fromarray(chw2hwc(value_colored))

    return value_colored


device = "cuda:0" if torch.cuda.is_available() else "cpu"
ckpt_dir ='checkpoints'
priorda = PriorDepthAnything(device=device, fmde_dir=ckpt_dir, ckpt_dir=ckpt_dir,cmde_dir = ckpt_dir, frozen_model_size = 'vitl', conditioned_model_size='vitb')

#todo 
# prior_root = '/share/project/cwm/shaocong.xu/exp/Prior-Depth-Anything/data/tricky_test_moge'
# prior_root = '/share/project/cwm/shaocong.xu/exp/Prior-Depth-Anything/data/tricky_test_moge_v1'
prior_root = '/share/project/cwm/shaocong.xu/exp/Prior-Depth-Anything/data/tricky_test_mogev2_normal'

loader = InferDepthLoader(jsons=['/share/project/cwm/shaocong.xu/exp/Prior-Depth-Anything/data/tricky_testset/test.jsonl'])

save_root_u = '/share/project/cwm/shaocong.xu/exp/Prior-Depth-Anything/logs/PriorDA_MoGe_test_mogev2_normal'


target_root = join(save_root_u,'npy')
target_vis_root = join(save_root_u,'png')
os.makedirs(target_vis_root, exist_ok=True)
os.makedirs(target_root, exist_ok=True)

all_save_path = []
for batch in tqdm(loader):
    # print(batch['hint'].shape, batch['hint_path'])
    tmp = batch['hint_path'].split('/')

    # path = join(prior_root,tmp[-3],tmp[-1].replace('png','npz'))
    # tgt_path = join(prior_root,tmp[-3],tmp[-1].replace('png','npy'))
    # np.save(tgt_path,np.load(path)['depth'].astype('float32'))

    prior_depth_path = join(prior_root,tmp[-3],tmp[-1].replace('png','npy'))
    save_root = join(target_root,tmp[-3])
    target_vis_save_root = join(target_vis_root,tmp[-3])

    os.makedirs(save_root, exist_ok=True)
    os.makedirs(target_vis_save_root, exist_ok=True)
    
    save_path = join(save_root, tmp[-1].replace('png','npy'))

    output = priorda.infer_one_sample(image=batch['hint_path'], prior=prior_depth_path)
    np.save(save_path, output.cpu().numpy().astype('float32'))

    #* vis 
    final_prediction = depth2colordepth(output.cpu().numpy())
    prior_depth = depth2colordepth(np.load(prior_depth_path))
    rgb_img = Image.open(batch['hint_path'])
    
    vis_save_path = join(target_vis_save_root, 'concat-'+tmp[-1])
    concat_images([rgb_img, prior_depth, final_prediction], output_path = vis_save_path)


    all_save_path.append(vis_save_path)
    
    
    

#todo concat all 

analysis_list = np.loadtxt('failture-case-analysis.txt', dtype = np.str_).tolist()
analysis_list = [item.split('/')[-2:] for item in analysis_list]
concat_images([x for x in all_save_path for item in analysis_list if item[0] in x and item[1] in x], output_path = join(target_vis_root,'all_analysis.png'), direction= 'vertical')




    

    
    



