
import torch
from qlora_utils import lora_utils, dataset, collator
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training,set_peft_model_state_dict
from torch.utils.data import DataLoader
from modelscope import AutoModel, AutoTokenizer, BitsAndBytesConfig
from tqdm import tqdm

checkpoint_path="C:\\Users\\16014\\.cache\\modelscope\\hub\\models\\ZhipuAI\\chatglm3-6b"
model = AutoModel.from_pretrained(checkpoint_path,
                                             trust_remote_code=True,
                                             torch_dtype=torch.float16,
                                             load_in_4bit=True,
                                             quantization_config=BitsAndBytesConfig(
                                                 load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16,
                                                 bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4",
                                                 llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False))

tokenizer = AutoTokenizer.from_pretrained(checkpoint_path, trust_remote_code=True)

#下面就是一个模型开始训练前的准备工作
model = prepare_model_for_kbit_training(model,use_gradient_checkpointing = False)
print(f'memory footprint of model: {model.get_memory_footprint() / (1024 * 1024 * 1024)} GB')

target_modules = ["query_key_value"]#lora_utils.find_all_linear_names(model)

# 初始化lora配置
config = LoraConfig(
    r=16,  # qlora矩阵的秩。一般设置为8、16、32、64等，在qlora论文中作者设为64。越大则参与训练的参数量越大，一般来说效果会更好，但需要更多显存，
    lora_alpha=16,  # qlora中的缩放参数。一般设为16、32即可
    target_modules=target_modules,  #
    lora_dropout=0.1,  # lora权重的dropout rate
    bias="none",
    task_type="CAUSAL_LM",
)


model = get_peft_model(model, config)
model.print_trainable_parameters()


model.config.torch_dtype = torch.float32
loss_func = lora_utils.TargetLMLoss(ignore_index=-100)

batch_size = 4
max_seq_length = 256
train_dataset = dataset.ChatGLM1Dataset("./data/dev.jsonl", tokenizer, max_seq_length)
data_collator = collator.SFTDataCollator(tokenizer, max_seq_length)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True,collate_fn=data_collator)

optimizer = torch.optim.AdamW(model.parameters(), lr = 2.14e-4)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max = 1200,eta_min=2e-6,last_epoch=-1)

"----------------------------下面使用accelerate加速-------------------------------------------"
from accelerate import Accelerator
accelerator = Accelerator()
device = accelerator.device
train_loader, model, optimizer = accelerator.prepare(
    train_loader, model, optimizer
)

"----------------------------下面开始模型训练-------------------------------------------"

for epoch in range(5):
    pbar = tqdm(train_loader, total=len(train_loader))
    for inputs in pbar:
        optimizer.zero_grad()
        _loss = loss_func(model,inputs,return_outputs=False)

        accelerator.backward(_loss)
        optimizer.step()
        lr_scheduler.step()  # 执行优化器
        pbar.set_description(f"epoch:{epoch +1}, train_loss:{_loss.item():.5f}, lr:{lr_scheduler.get_last_lr()[0]*100:.5f}")
    model.save_pretrained("./qlora/qlora_saver")

