import torch
from PIL import Image
from transformers import AutoProcessor, LlavaForConditionalGeneration


IMAGE_PATH = "/data/shengjie/style_zhenzhi/img_8.jpg"
PROMPT = "Write a concise descriptive caption about texture and shape for this garment in a professional tone. For example, 'The texture of garment is <texture description> and the shape is <shape description> '"
# MODEL_NAME = "fancyfeast/llama-joycaption-beta-one-hf-llava"
MODEL_NAME = "/home/shengjie/ckp/llama-joycaption-beta-one-hf-llava"


# Load JoyCaption
# bfloat16 is the native dtype of the LLM used in JoyCaption (Llama 3.1)
# device_map=0 loads the model into the first GPU
processor = AutoProcessor.from_pretrained(MODEL_NAME)
llava_model = LlavaForConditionalGeneration.from_pretrained(MODEL_NAME, 
                                                            torch_dtype="bfloat16", 
                                                            device_map=0)
llava_model.eval()

def test():
    with torch.no_grad():
        # Load image
        image = Image.open(IMAGE_PATH)

        # Build the conversation
        convo = [
            {
                "role": "system",
                "content": "You are a helpful image captioner.",
            },
            {
                "role": "user",
                "content": PROMPT,
            },
        ]

        # Format the conversation
        # WARNING: HF's handling of chat's on Llava models is very fragile.  This specific combination of processor.apply_chat_template(), and processor() works
        # but if using other combinations always inspect the final input_ids to ensure they are correct.  Often times you will end up with multiple <bos> tokens
        # if not careful, which can make the model perform poorly.
        convo_string = processor.apply_chat_template(convo, tokenize = False, add_generation_prompt = True)
        assert isinstance(convo_string, str)

        # Process the inputs
        inputs = processor(text=[convo_string], images=[image], return_tensors="pt").to('cuda')
        inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)

        # Generate the captions
        generate_ids = llava_model.generate(
            **inputs,
            max_new_tokens=256,
            do_sample=True,
            suppress_tokens=None,
            use_cache=True,
            temperature=0.6,
            top_k=None,
            top_p=0.9,
        )[0]

        # Trim off the prompt
        generate_ids = generate_ids[inputs['input_ids'].shape[1]:]

        # Decode the caption
        caption = processor.tokenizer.decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
        caption = caption.strip()

        caption_list = caption.split('.')
        caption_for_texture = None
        caption_for_shape = None
        # for c in caption_list:
        #     if 'texture' in c:
        #         caption_for_texture = c
        #         break
        # for c in caption_list:
        #     if 'shape' in c:
        #         caption_for_shape = c
        #         break

        print(caption)
        # print(caption_for_texture)
        # print(caption_for_shape)
def generate_caption_from_img(img_path):
        with torch.no_grad():
            # Load image
            image = Image.open(img_path)

            # Build the conversation
            convo = [
                {
                    "role": "system",
                    "content": "You are a helpful image captioner.",
                },
                {
                    "role": "user",
                    "content": PROMPT,
                },
            ]

            # Format the conversation
            # WARNING: HF's handling of chat's on Llava models is very fragile.  This specific combination of processor.apply_chat_template(), and processor() works
            # but if using other combinations always inspect the final input_ids to ensure they are correct.  Often times you will end up with multiple <bos> tokens
            # if not careful, which can make the model perform poorly.
            convo_string = processor.apply_chat_template(convo, tokenize = False, add_generation_prompt = True)
            assert isinstance(convo_string, str)
            inputs = processor(text=[convo_string], images=[image], return_tensors="pt").to('cuda')
            inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)

            # Generate the captions
            generate_ids = llava_model.generate(
                **inputs,
                max_new_tokens=512,
                do_sample=True,
                suppress_tokens=None,
                use_cache=True,
                temperature=0.6,
                top_k=None,
                top_p=0.9,
            )[0]

        # Trim off the prompt
        generate_ids = generate_ids[inputs['input_ids'].shape[1]:]

        # Decode the caption
        caption = processor.tokenizer.decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
        caption = caption.strip()

        return caption

import os,pdb,json
from tqdm import tqdm
def process_t2_shape_to_json(t2_dir,shape_dir,save_json_path):
    '''
    t2 : {
        t2_name1 : caption 1,
        t2_name2 : caption 2,
        ...
    }
    shape : {
        shape_name1 : caption 1,
        shape_name2 : caption 2,
        ...
    }
    '''
    image_files = os.listdir(t2_dir)
    # ori_files = [f for f in image_files if '-' not in f]
    shape_files = os.listdir(shape_dir)

    dic = {
        't2':{},
        'shape':{},
    }

    for t2 in tqdm(image_files):
        t2_path = os.path.join(t2_dir,t2)
        assert os.path.exists(t2_path)
        caption_img2 = generate_caption_from_img(t2_path)
        dic["t2"][t2] = caption_img2
        print(caption_img2)
    for shape in tqdm(shape_files):
        shape_path = os.path.join(shape_dir,shape)
        assert os.path.exists(shape_path)
        caption_shape = generate_caption_from_img(shape_path)
        dic["shape"][shape] = caption_shape
        print(caption_shape)
    pdb.set_trace()
    with open(save_json_path,'w',encoding='utf-8') as f:
        json.dump(dic,f,ensure_ascii=False,indent=4)


if __name__ == '__main__':
    img_dir = '/data/shengjie/style_zhenzhi/'
    shape_dir = '/data/shengjie/synthesis_zhenzhi/'
    save_json_path = './caption.json'

    process_t2_shape_to_json(img_dir,shape_dir,save_json_path)


    pdb.set_trace()

    # test()