from PIL import Image
from transformers import LlavaProcessor, LlavaForConditionalGeneration, BitsAndBytesConfig
import torch
import json
from data import get_loader
from peft import PeftModel,PeftMixedModel

if __name__ == "__main__":
    with open("config.json") as f:
        config = json.load(f)

    model_name_or_path = config["model_config"]["model_path"]  # 

    llava_processor = LlavaProcessor.from_pretrained(model_name_or_path)
    llava_processor.tokenizer.padding_side = "left"
    tokenizer = llava_processor.tokenizer
    llava_processor.patch_size = llava_processor.image_processor.patch_size
    model = LlavaForConditionalGeneration.from_pretrained(
        model_name_or_path, 
        device_map="auto", 
        torch_dtype=torch.bfloat16,
    )
    # model = PeftModel.from_pretrained(
    #     model,
    #     "./lora_model",
    #     # "./lora_model_only_projector",
    #     torch_dtype=torch.bfloat16,
    # )
    # model.add_adapter("./lora_model/language_lora", config="lora_config.json")

    #这里加载两个适配器，所以需要使用PeftMixedModel
    model = PeftMixedModel.from_pretrained(
        model,
        "./lora_model",
        torch_dtype=torch.bfloat16,)
    model.load_adapter("./lora_model/language_lora", adapter_name="language_lora")
    model.set_adapter(['language_lora', 'default'])
    # for param in model.named_parameters():
    #     print(param[0], param[1].requires_grad)
    # print(model.base_model.model.multi_modal_projector.linear_1.lora_A.default.weight)
    # print(model.base_model.model.multi_modal_projector.linear_1.lora_B.default.weight)
    print("Active Adapters:", model.active_adapters)  # 应显示 ['default', 'language_lora']
    print("Adapter Configs:", model.peft_config)  # 应显示两个独立的适配器配置
    model.eval()
    prompt_text = "<image>\nWhat are these? Describe the contents of this image."
    messages = [
        {"role": "system", "content": "You are a helpful assistant. Can you please describe the contents of this image in the following way: (1) In one to two sentences at most under the heading entitled 'DESCRIPTION' (2) Transcribe any text found within the image and where it is located under the heading entitled 'TEXT'?\n\nFor example, you might describe a picture of a palm tree with a logo on it in the center that spells the word COCONUT as:\n\nDESCRIPTION\nA photograph of a palm tree on a beach somewhere, there is a blue sky in the background and it is a sunny day. There is a blue text logo with white outline in the center of the image.\n\nTEXT\nThe text logo in the center of the image says, \"COCONUT\".\n\nBe sure to describe all the text that is found in the image."},
        {"role": "user", "content": prompt_text},
    ]
    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

    image_path = "./000000039769.jpg"
    image = Image.open(image_path)
    inputs = llava_processor(text=prompt, images=image, return_tensors="pt")

    for tk in inputs.keys():
        inputs[tk] = inputs[tk].to(model.device)

    generate_ids = model.generate(**inputs, max_new_tokens=100)
    print(tokenizer.decode(list(generate_ids[0])))
    