import os 




from os.path import join, dirname, abspath  





import torchvision
from PIL import Image
import json

import numpy as np  
import torch   
import cv2

from promptda.utils.depth_utils import visualize_depth
import matplotlib.pyplot as plt

from tqdm import tqdm   


from promptda.promptda import PromptDA
from promptda.utils.io_wrapper import load_image, load_depth, save_depth



from testset_loader import InferDepthLoader

def ensure_multiple_of(x, multiple_of=14):
    return int(x // multiple_of * multiple_of)






class InferDepthNpyLoader_no_normalizer:


    def __init__(self, jsons=['test.jsonl'] ):
        
        
        all_data = []
        for json_name in jsons:
            root = dirname(json_name)
            with open(os.path.join(root,json_name ), "r") as f:
                for line in f:
                    data_term = json.loads(line)
                    # data_term['image'] = os.path.join(root, data_term['image'])
                    if data_term.get('conditioning_image',None) is not None :
                        data_term['conditioning_image'] = os.path.join(root, data_term['conditioning_image'])
                    #!  for tricky
                    else:
                        data_term['conditioning_image'] = os.path.join(root, data_term['data_path'])

                    all_data += [data_term]
                

        
        print(f"data size is {len(all_data)}")
    

        self.all_data = all_data
        self.transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
        ])

        
    def __len__(self):
        return len(self.all_data)

    def __getitem__(self, idx):
        

        item = self.all_data[idx]

        input_image_path = item['conditioning_image']

        #todo resize ? 
        test_image = Image.open(input_image_path).convert('RGB')
        test_image = np.array(test_image).astype(np.float32)
        test_image = test_image / 255.0 #* B, H, W, C
        test_image = self.transforms(test_image) #* B, H, W


        coarse_depth = np.load(item['moge_image'])        
        coarse_depth = self.transforms(coarse_depth)


        # #* depth uint is m 
        return dict(hint = test_image, prompt_depth =coarse_depth, hint_path=input_image_path)
        




depth_loader = InferDepthNpyLoader_no_normalizer(jsons=['/share/project/cwm/shaocong.xu/exp/Lotus/data/MoGe_submission/test_full.jsonl'])
 

depth_loader = torch.utils.data.DataLoader(
    depth_loader,
    batch_size=1,
    shuffle=False,
    num_workers=0,
    pin_memory=True,
)


DEVICE = 'cuda'
# model_path='/share/project/cwm/shaocong.xu/exp/PromptDA/ckpt/prompt-depth-anything-vitl/model.ckpt'
# model = PromptDA.from_pretrained(model_path).to(DEVICE).eval()

model_path='/share/project/cwm/shaocong.xu/exp/PromptDA/ckpt/prompt-depth-anything-vits-transparent/model.ckpt'
model = PromptDA.from_pretrained(model_path,model_kwargs = {"encoder":"vits"}).to(DEVICE).eval()




multiple_of = 14
max_size = 1008


target_path = '/share/project/cwm/shaocong.xu/exp/PromptDA/logs/promptDA_moge/'
os.makedirs(target_path, exist_ok=True)


npy_path = join(target_path, 'npy')
vis_path = join(target_path, 'vis')

os.makedirs(npy_path, exist_ok=True)
os.makedirs(vis_path, exist_ok=True)




for batch in tqdm(depth_loader):

    # print(batch['hint'].shape, batch['prompt_depth'].shape, batch['hint_path'])

    image = batch['hint']
    prompt_depth = batch['prompt_depth']


    tensor_resizer = inverse_tensor_resizer = None
    if max(image.shape) > max_size:
        h, w = image.shape[-2:]
        scale = max_size / max(h, w)
        tar_h = ensure_multiple_of(h * scale)
        tar_w = ensure_multiple_of(w * scale)
        tensor_resizer = torchvision.transforms.Resize((tar_h, tar_w))

        inverse_tensor_resizer = torchvision.transforms.Resize((h, w))
        # image = cv2.resize(image, (tar_w, tar_h), interpolation=cv2.INTER_AREA)

    if tensor_resizer is not None  and inverse_tensor_resizer is not None:
        # print(image.shape,prompt_depth.shape)
        image,prompt_depth = tensor_resizer(image),tensor_resizer(prompt_depth)
        # print(image.shape, prompt_depth.shape)
    


    out_depth = model.predict(image.to(DEVICE), prompt_depth.to(DEVICE)) # HxW, depth in meters





    out_depth = inverse_tensor_resizer(out_depth) if inverse_tensor_resizer is not None else out_depth
    image = inverse_tensor_resizer(image) if inverse_tensor_resizer is not None else image
    prompt_depth = inverse_tensor_resizer(prompt_depth) if inverse_tensor_resizer is not None else prompt_depth

    #todo save npy
    
    assert len(batch['hint_path']) ==1

    tmp = batch['hint_path'][0].split('/')
    os.makedirs(join(npy_path, tmp[-3]), exist_ok=True)
    np.save(join(npy_path, tmp[-3], tmp[-1].replace('.png', '.npy')), out_depth.squeeze().cpu().numpy() )

    


    #todo save vis

    plt.figure(figsize=(15, 5))

    plt.subplot(1, 3, 1)
    plt.title('Input Image')
    plt.imshow(image.cpu().squeeze().permute(1, 2, 0).numpy() )


    plt.subplot(1, 3, 2)
    plt.title('prompt dpeth')
    plt.imshow(visualize_depth(prompt_depth.cpu().squeeze()), cmap='plasma')

    plt.subplot(1, 3, 3)
    plt.title('prediction dpeth')
    plt.imshow(visualize_depth(out_depth.cpu().squeeze()), cmap='plasma')

    os.makedirs(join(vis_path, tmp[-3]), exist_ok=True)
    plt.savefig(join(vis_path, tmp[-3], tmp[-1]))

