from PIL import Image
import requests,os,json
from tqdm import tqdm
from transformers import Blip2Processor, Blip2ForConditionalGeneration
import torch,pdb

BLIP_CKP = '/home/shengjie/ckp/blip2-opt-2.7b'
img_file = ''

device = "cuda" if torch.cuda.is_available() else "cpu"

processor = Blip2Processor.from_pretrained(BLIP_CKP)
model = Blip2ForConditionalGeneration.from_pretrained(
    BLIP_CKP, load_in_8bit=True, 
    device_map={"": 0}, torch_dtype=torch.float16
)  # doctest: +IGNORE_RESULT

def get_caption_from_img(img_path,question=None):
# url = "http://images.cocodataset.org/val2017/000000039769.jpg"
# image = Image.open(requests.get(url, stream=True).raw)
    image = Image.open(img_path)
    text = f'Question: {question} Answer:' if question is not None else None
    inputs = processor(images=image,text=text, return_tensors="pt").to(device, torch.float16)
    generated_ids = model.generate(**inputs)
    generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
    # print(generated_text)
    return generated_text

def visual_question(img_path,question=None):
    image = Image.open(img_path)
    inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)

    prompt = question if question else "Question: how many cats are there? Answer:"
    inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16)

    generated_ids = model.generate(**inputs)
    generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
    print(generated_text)
def use_8_bit():
    prompt = "Question: how many cats are there? Answer:"
    model = Blip2ForConditionalGeneration.from_pretrained(
        "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.bfloat16
    )  # doctest: +IGNORE_RESULT

    inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)

    generated_ids = model.generate(**inputs)
    generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
    print(generated_text)

def process_t2_shape_to_json(t2_dir,shape_dir,save_json_path):
    '''
    t2 : {
        t2_name1 : caption 1,
        t2_name2 : caption 2,
        ...
    }
    shape : {
        shape_name1 : caption 1,
        shape_name2 : caption 2,
        ...
    }
    '''
    image_files = os.listdir(t2_dir)
    # ori_files = [f for f in image_files if '-' not in f]
    shape_files = os.listdir(shape_dir)

    dic = {
        't2':{},
        'shape':{},
    }

    for t2 in tqdm(image_files):
        t2_path = os.path.join(t2_dir,t2)
        caption_img2 = get_caption_from_img(t2_path,
                                            "What's the cloth details?")
        dic["t2"][t2] = caption_img2
        print(caption_img2)
    for shape in tqdm(shape_files):
        shape_path = os.path.join(shape_dir,shape)
        caption_shape = get_caption_from_img(shape_path,
                                             "What's the cloth details?")
        dic["shape"][shape] = caption_shape
        print(caption_shape)
    pdb.set_trace()
    with open(save_json_path,'w',encoding='utf-8') as f:
        json.dump(dic,f,ensure_ascii=False,indent=4)


if __name__ == '__main__':
    img_dir = '/data/shengjie/style_zhenzhi/'
    shape_dir = '/data/shengjie/synthesis_zhenzhi/'
    save_json_path = './caption.json'

    process_t2_shape_to_json(img_dir,shape_dir,save_json_path)

    # img_path = '/data/shengjie/style_zhenzhi/img_1.jpg'
    # question = "What's the texture?"
    # caption = get_caption_from_img(
    #     img_path=img_path,
    #     question=None,
    # )

    pdb.set_trace()


    