
import os 
from os.path import  join, split, exists

import model
from model.mapfns import ControlNet
from jupyters.utils import  * 
from dataset.nyuv2ssl import *
import json 
import torch

from pytorch_lightning import seed_everything
import einops
import matplotlib.pyplot as plt 




def resize_image(input_image, resolution):
    H, W, C = input_image.shape
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(np.round(H / 64.0)) * 64
    W = int(np.round(W / 64.0)) * 64
    img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    return img


def resize_image_tensor(input_image, resolution):
    # Assuming input_image is a PyTorch tensor of shape (B, C, H, W)
    # and is already on the correct device (cpu or cuda)
    
    B, C, H, W = input_image.shape
    H, W = float(H), float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(round(H / 64.0)) * 64
    W = int(round(W / 64.0)) * 64
    
    return F.interpolate(input_image, size=(H, W), mode='nearest')
    
     

def to_numpy(img):
    return img.cpu().numpy()


def to_tensor(img):
    c_n = len(img.shape)
    if c_n == 3:
        img = img.transpose((2,0,1))
    return torch.from_numpy(img)

    

class ControlNetSampler:

    def __init__(self,control_net_load_path, class_dict_path = 'data/nyuv2_settings/class_dict.json'):

        with open(class_dict_path,'r') as f :
            self.class_dict = { int(k):v for k,v in json.load(f).items()}
            
        self.task_dict= {
            0: 'semantic segmentation',
            1: 'depth estimation',
            2:'surface normal estimation'
        }

        self.task_channels=[13, 1, 3] 
        #* 
        tmp_control_net_save_path = 'logs/debug_controlnet'#* actually, no thing output

        mapfns = ControlNet(out_dir = tmp_control_net_save_path)
        
        checkpoint = torch.load(control_net_load_path)

        if checkpoint.get('mapfns') is not None:
            #* if using controlNet, the model structure is inconsistent and need further consideration
            print(mapfns.load_state_dict(checkpoint['mapfns']))
            
        self.control_net_model = mapfns
    

    def get_channel_num(self,task_id):
        return self.task_channels[task_id]

    def get_prompt(self,task_id,semantic_map):

        
        class_in_scene = [self.class_dict[x+1] for x in semantic_map.unique().tolist() if x != -1] 

        text_prompt = "an authentic indoor scene, including {}, the details consistent with the provided {}".format(\
                    ','.join(class_in_scene), self.task_dict[task_id])

        return text_prompt

    """
    
    hint_img  in tensor 
    """
    def painter_process(self,task_id, hint_img,is_gt = True):

        task_channel_num = self.get_channel_num(task_id)
        new_hint_img = self.control_net_model.painter_process(hint_img, task_id,task_channel_num,is_gt = is_gt)

        return new_hint_img


    def get_controlnet_input(self, hint_img,semantic_map , task_id = 2, origin_img = None, is_gt = True):
        """
        keep tensor for the gradient calcualtion
        hint_img in tensor, [B,H,W], not C
        semantic_map in tensor or numpy
        origin_img in tensor, for reshaping to the same size for visualize, if not need for visulizing then not need to pass 
        """
        
        text_prompt = self.get_prompt(task_id,semantic_map)
        new_hint_img = self.painter_process(task_id,hint_img, is_gt = is_gt) 
        new_hint_img = new_hint_img.permute(0,3,1,2) #* from [B,H,W,C] to [B,C,H,W]

        new_hint_img_resized = resize_image_tensor(new_hint_img.float(),512)

        if origin_img is not None :
            origin_img_resized = resize_image_tensor(origin_img,512)

        #* draw for debugging
        # draw_normal(image_permuted_resized_tensor, new_hint_img_resized_tensor, alpha)
        #* for controlnet input
        if origin_img is not None :
            return text_prompt, new_hint_img_resized, origin_img_resized
        else:
            return text_prompt, new_hint_img_resized





    def samples(self, prompt,hint_map, scale = 5, \
        ddim_steps = 30, eta = 0, seed = -1, \
            strength = 1, guess_mode =  False):
        """
        prompt: a string 
        hint_map: value range from 0 to 255, shape: [1, 512, 704, 3]

        #! Quality, sampling speed and diversity are best controlled via the scale, ddim_steps and ddim_eta arguments. 
        scale = 5 #* original parameter is 9 , Quality,  the unconditional scale, 
        ddim_steps = 30 #* sampling speed 
        eta = 0 #* diversity 

        seed = -1
        strength = 1 #* control the strength of  hint image condition
        guess_mode =  False
        """

        B, C, H, W= hint_map.shape

        with torch.no_grad():
            """
                # input_image = HWC3(input_image)
                # hint_map = apply_uniformer(resize_image(input_image, detect_resolution)) #* segmentation map 
                # img = resize_image(input_image, image_resolution)
                # H, W, C = img.shape
                # hint_map = cv2.resize(hint_map, (W, H), interpolation=cv2.INTER_NEAREST)
            """
            #* hint image preprocess 
            if hint_map.max() > 1:
                hint_map = hint_map / 255.0
            control = hint_map.clone()
            # * equal to torch.all(detected_map <= 1) and torch.all(detected_map >= 0)  
            assert not torch.any(control > 1) and not torch.any(control < 0)
            
            seed = 10 
            if seed == -1:
                seed = random.randint(0, 65535)
            seed_everything(seed)

            #* hint image condition are same ofr cond and un_cond!!! 
            #* but the language condition are different for cond and un_cond

            if isinstance(prompt, list):
                cond = {"c_concat": [control], "c_crossattn":\
                     [self.control_net_model.model.get_learned_conditioning(prompt )]}
            else:
                cond = {"c_concat": [control], "c_crossattn":\
                     [self.control_net_model.model.get_learned_conditioning([prompt ] * B)]}


            un_cond = {"c_concat": None if guess_mode else [control],\
                 "c_crossattn": [self.control_net_model.model.get_learned_conditioning([""] * B)]}

            shape = (4, H // 8, W // 8) #* how to decide this 

            #? Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
            self.control_net_model.model.control_scales = [strength * (0.825 ** float(12 - i)) \
                for i in range(13)] if guess_mode else ([strength] * 13) 
            
            samples, intermediates = self.control_net_model.ddim_sampler.sample(ddim_steps, B,
                                                        shape, cond, verbose=False, eta=eta,
                                                        unconditional_guidance_scale=scale,
                                                        unconditional_conditioning=un_cond)
            #* samples.shape : [B,4,64,88] --> [B,3, 512, 704]
            x_samples = self.control_net_model.model.decode_first_stage(samples) #* from [B,4,96,64] -->  [B,3,768,512]
            
            x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).clip(0, 255).squeeze()
            #*  === (x_samples.permute(0,2,3,1) * 127.5 + 127.5).clip(0,255)
            return x_samples, samples #* the output of decoder and the output of unet, 

    