import torch
import loguru
from   transformers import BertForSequenceClassification
from   peft         import (LoraConfig, TaskType, get_peft_model, 
                            LoftQConfig, PeftModel, PeftConfig)
from   utils        import get_loader, get_model

logger = loguru.logger
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

*_, dataloader = get_loader()

pretrain_model_dir  = 'model/bert_cls_pretrained'
model, optimizer, _ = get_model()
model               = BertForSequenceClassification.from_pretrained(pretrain_model_dir)


logger.warning(model.classifier)

lora_config = LoraConfig(
        # task_type 任务类型，常见的有:SEQ_CLS,SEQ_2_SEQ_LM,CAUSAL_LM,
        #                TOKEN_CLS,QUESTION_ANS, FEATURE_EXTRACTION 
        task_type       = TaskType.SEQ_CLS,
        inference_mode  = False,                 # 是否为推理模式
        r               = 8,                     # A,B矩阵的秩
        lora_alpha      = 32,
        lora_dropout    = 0.1,
        target_modules  = ['classifier']
    )

model = get_peft_model(model=model, peft_config=lora_config)

#! trainable params: 13,914 || all params: 109,510,068 || trainable%: 0.012705681088610045
model.print_trainable_parameters()

# ModulesToSaveWrapper(
#   (original_module): lora.Linear(
#     (base_layer): Linear(in_features=768, out_features=10, bias=True)
#     (lora_dropout): ModuleDict(
#       (default): Dropout(p=0.1, inplace=False)
#     )
#     (lora_A): ModuleDict(
#       (default): Linear(in_features=768, out_features=8, bias=False)
#     )
#     (lora_B): ModuleDict(
#       (default): Linear(in_features=8, out_features=10, bias=False)
#     )
#     (lora_embedding_A): ParameterDict()
#     (lora_embedding_B): ParameterDict()
#   )
#   (modules_to_save): ModuleDict(
#     (default): lora.Linear(
#       (base_layer): Linear(in_features=768, out_features=10, bias=True)
#       (lora_dropout): ModuleDict(
#         (default): Dropout(p=0.1, inplace=False)
#       )
#       (lora_A): ModuleDict(
#         (default): Linear(in_features=768, out_features=8, bias=False)
#       )
#       (lora_B): ModuleDict(
#         (default): Linear(in_features=8, out_features=10, bias=False)
#       )
#       (lora_embedding_A): ParameterDict()
#       (lora_embedding_B): ParameterDict()
#     )
#   )
# )
#! logger.warning(model.classifier)

#! 开始训练
model.to(device)
for i, data in enumerate(dataloader):
    for k, v in data.items():
        data[k] = v.to(device)
    out = model(**data)
    out.loss.backward()
    torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
    optimizer.step()
    optimizer.zero_grad()

    if i % 1 == 0:
        labels = data['labels']
        logits = out['logits'].argmax(1)
        acc = (labels == logits).sum().item() / len(labels)

        logger.info(f"idx = {i:3d},  loss = {out.loss.item():.4f}, acc = {acc:.4f}")

#! 仅保存lora参数  
model.save_pretrained('model/bert_lora_sft_only')    # 120k
model = model.merge_and_unload()                     # 将lora参数合并到原始模型
model.save_pretrained('model/bert_lora_sft_merge')   # 418MB

#! 分别导入模型
model = BertForSequenceClassification.from_pretrained(pretrain_model_dir)
PeftConfig.from_pretrained("model/bert_lora_sft_only")   # 加载保存的config
model = PeftModel.from_pretrained(model, "model/bert_lora_sft_only", 
                                is_trainable=True)   # 插入保存的lora层(插入不合并)


