from PIL import Image
from transformers import LlavaProcessor, LlavaForConditionalGeneration, BitsAndBytesConfig
import torch
import json
from data import get_loader
from peft import LoraConfig, get_peft_model, PeftModel, PeftMixedModel
import deepspeed
import os

DEVICE='cuda:0' if torch.cuda.is_available() else 'cpu'
USE_DEEPSPEED = False
# QUANTIZED = True
#`LORA_ONLY_PROJECTOR`：此参数用于控制训练过程中是否仅使用LoRA（低秩适配）投影模块。如果启用，则只训练投影层，如果关闭，则只训练语言层
LORA_ONLY_PROJECTOR = True

#冻结视觉层，只微调多模态投影层和语言层的参数
def load_model(model, config, only_projector=False):
    LORA_R = config["lora_config"]["r"]
    LORA_ALPHA = config["lora_config"]["alpha"]
    LORA_DROPOUT = config["lora_config"]["dropout"]
    PROJECTOR_TARGET_MODULES = ["multi_modal_projector.linear_1", "multi_modal_projector.linear_2"]
    LANGUAGE_TARGET_MODULES = config["lora_config"]["target_modules"]
    lora_config = LoraConfig(
        r=LORA_R,
        lora_alpha=LORA_ALPHA,
        target_modules=PROJECTOR_TARGET_MODULES if only_projector else LANGUAGE_TARGET_MODULES,
        lora_dropout=LORA_DROPOUT,
        bias="none",
        task_type="CAUSAL_LM"
    )
    if only_projector and not os.path.exists("./lora_model_only_projector"):
        model = get_peft_model(model, lora_config)
    else:#这里有两种可能，一种是已经存在lora_model_only_projector文件夹且only_projector==True,另一种是only_projector==False
        model = PeftModel.from_pretrained(
            model,
            "./lora_model_only_projector",
            torch_dtype=torch.bfloat16,
            is_trainable=True,
        )
        #如果only_projector==False，则通过add_adapter添加语言层，并set_adapter，此时只微调语言层的参数，因为PeftModel只支持一个适配器。PeftMixedModel支持多个适配器,但是不支持4bit/8bit量化
        if not only_projector:
            model.add_adapter("language_lora", lora_config)
            model.set_adapter("language_lora")
            # 将视觉层的参数冻结，只训练多模态投影层和语言层的参数
            for param in model.vision_tower.parameters():
                param.requires_grad = False
    model.print_trainable_parameters()
    print("Active Adapters:", model.active_adapters)  # 应显示 ['default', 'language_lora']
    print("Adapter Configs:", model.peft_config)  # 应显示两个独立的适配器配置
    return model

def process_data(image_filepath, caption, prompt, llava_processor:LlavaProcessor):
    bs = len(image_filepath)
    processed_data = {}
    prompt_tokens_list = []
    text_list = []
    label_list = []
    for i in range(bs):
        image = Image.open(image_filepath[i])
        text = caption[i] + "<|im_end|>"
        prompt_tokens = llava_processor(text=prompt, images=image, return_tensors="pt", padding="longest", truncation=True)
        prompt_tokens_list.append(prompt_tokens)
        text_list.append(text)
    prompt_token_size = prompt_tokens_list[0]["input_ids"].shape[1] #提示词的长度
    prompt_label = torch.full((1, prompt_token_size), -100)  #prompt_label，用-100填充，表示不参与loss计算
    text_tokens_with_padding = llava_processor(text=text_list, return_tensors="pt", padding="longest", truncation=True)#对text_list做tokenize，此时要做padding
    for i in range(len(prompt_tokens_list)):
        mask = text_tokens_with_padding["attention_mask"][i]
        padding_num = mask.shape[0] - mask.sum()  #计算每个sequence的padding数量
        valid_tokens_num = mask.sum()  #一个sequence中的有效token数量
        label = torch.concat([prompt_label,  #拼接出完整的label
                              text_tokens_with_padding["input_ids"][i][0:valid_tokens_num].reshape(1,-1),
                              torch.full((1,padding_num), -100)],
                              dim=-1)
        prompt_tokens_list[i]["input_ids"] = torch.concat([prompt_tokens_list[i]["input_ids"],
                                                          text_tokens_with_padding["input_ids"][i].reshape(1,-1)],
                                                          dim=-1)
        prompt_tokens_list[i]["attention_mask"] = torch.concat([prompt_tokens_list[i]["attention_mask"],
                                                                text_tokens_with_padding["attention_mask"][i].reshape(1,-1)],
                                                                dim=-1)
        label_list.append(label)
    processed_data["input_ids"] = torch.concat([p["input_ids"] for p in prompt_tokens_list], dim=0)
    processed_data["attention_mask"] = torch.concat([p["attention_mask"] for p in prompt_tokens_list], dim=0)
    processed_data["pixel_values"] = torch.concat([p["pixel_values"] for p in prompt_tokens_list], dim=0)
    processed_data["labels"] = torch.concat([l for l in label_list], dim=0)
    return processed_data


if __name__ == "__main__":
    if USE_DEEPSPEED:
        with open("deepspeed_config.json") as f:
            config = json.load(f)
    else:
        with open("config.json") as f:
            config = json.load(f)
    
    nf4_config = BitsAndBytesConfig(
        bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True,
        bnb_4bit_compute_dtype=torch.bfloat16
    )

    bs = config["train_config"]["batch_size"]
    model_name_or_path = config["model_config"]["model_path"]  # 

    llava_processor = LlavaProcessor.from_pretrained(model_name_or_path)
    # llava_processor.tokenizer.padding_side = "left"
    tokenizer = llava_processor.tokenizer
    llava_processor.patch_size = llava_processor.image_processor.patch_size
    model = LlavaForConditionalGeneration.from_pretrained(
        model_name_or_path, 
        device_map="auto", 
        torch_dtype=torch.bfloat16,
        quantization_config=nf4_config
    )
    model = load_model(model, config, LORA_ONLY_PROJECTOR) #使用lora
    for param in model.named_parameters():
        print(param[0], param[1].requires_grad)
    print(model.base_model.model.multi_modal_projector.linear_1.lora_A.default.weight)
    print(model.base_model.model.multi_modal_projector.linear_1.lora_B.default.weight)
    # parameters = filter(lambda p: p.requires_grad, model.parameters())
    if USE_DEEPSPEED:
        model, optimizer, _, lr_scheduler = deepspeed.initialize(model=model, config="deepspeed_config.json", model_parameters=parameters)
    else:
        optimizer = torch.optim.AdamW(model.parameters(),lr = config["train_config"]["lr"], weight_decay = config["train_config"]["weight_decay"])
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5000, eta_min=3e-5)
    prompt_text = "<image>\nWhat are these? Describe the contents of this image."
    messages = [
        {"role": "system", "content": "You are a helpful assistant. Can you please describe the contents of this image in the following way: (1) In one to two sentences at most under the heading entitled 'DESCRIPTION' (2) Transcribe any text found within the image and where it is located under the heading entitled 'TEXT'?\n\nFor example, you might describe a picture of a palm tree with a logo on it in the center that spells the word COCONUT as:\n\nDESCRIPTION\nA photograph of a palm tree on a beach somewhere, there is a blue sky in the background and it is a sunny day. There is a blue text logo with white outline in the center of the image.\n\nTEXT\nThe text logo in the center of the image says, \"COCONUT\".\n\nBe sure to describe all the text that is found in the image."},
        {"role": "user", "content": prompt_text},
    ]
    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    TextOCR_dataloader = get_loader(dataset_dir=config["data_config"]["dataset_dir"], 
                                    text_filename=config["data_config"]["text_filename"], 
                                    image_dir=config["data_config"]["image_dir"], 
                                    batch_size=config["train_config"]["batch_size"])

    model.train()
    for epoch in range(config["train_config"]["epoch"]):
        i = 0
        for image_filepath, caption in TextOCR_dataloader:
            print("epoch:", epoch, "i:", i)
            data = process_data(image_filepath=image_filepath, caption=caption, prompt=prompt, llava_processor=llava_processor)
            for k in data.keys():
                data[k] = data[k].to(model.device)
            output = model(**data)
            loss = output.loss
            # print("loss:",loss, "cuda memory use:", torch.cuda.memory_allocated(0)/(1024*1024), "lr:",lr_scheduler.get_lr())
            print("loss:",loss, "cuda memory use:", "lr:",lr_scheduler.get_lr())
            if USE_DEEPSPEED:
                model.backward(loss)
                model.step()
            else:
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()  
                lr_scheduler.step()
            '''
            计算优化器参数量，由于使用了lora，优化器的参数量很小，整个模型的主要显存使用在于模型参数和中间值
            mem_result = []
            def _recurse_add_to_result(obj):
                if isinstance(obj, dict):
                    for k, v in obj.items():
                        _recurse_add_to_result(obj[k])
                elif isinstance(obj, torch.Tensor):
                    mem_result.append(obj.shape.numel())
            _recurse_add_to_result(optimizer.state_dict())
            print(mem_result, sum(mem_result)/(1024*1024))
            '''
            i+=1
            # if i % 10 == 0:
                # model.save_pretrained('./lora_model', adapter_names=model.peft_config.keys())
                # break
                # torch.cuda.empty_cache()
        if LORA_ONLY_PROJECTOR:
            model.save_pretrained('./lora_model_only_projector')
        else:
            model.save_pretrained('./lora_model')

# Qwen的decode阶段使用的mask矩阵是一个下三角矩阵，这个mask矩阵作用的范围是所有的单词，包括了question和answer
# input_ids是将question和answer拼接到一起的，mask矩阵的作用范围是全体，但是这样做是可行的。对于question大部分被遮住，answer完全被遮住的情况下的decoder的输出其实是直接抛弃掉的
# 只需要考虑question没有被遮住，answer部分被遮住的情况下的decoder的输出

    # "Can you please describe the contents of this image in the following way: \
    # (1) In one to two sentences at most under the heading entitled 'DESCRIPTION' \
    # (2) Transcribe any text found within the image and where it is located under the heading entitled 'TEXT'?\n\n \
    # For example, you might describe a picture of a palm tree with a logo on it in the center that spells the word COCONUT as:\n\n\
    # DESCRIPTION\nA photograph of a palm tree on a beach somewhere, there is a blue sky in the background and it is a sunny day. \
    # There is a blue text logo with white outline in the center of the image.\n\nTEXT\nThe text logo in the center of the image says, \"COCONUT\".\n\n\
    # Be sure to describe all the text that is found in the image.
    # "
