# import torch.nn as nn
# from transformers import AutoTokenizer, AutoModel
# from peft import get_peft_model, LoraConfig, TaskType, PeftModel # pip install peft==0.13.2
# from transformers import Trainer
#
# # 加载底座模型
# MODEL_PATH = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'
# tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
# model = AutoModel.from_pretrained(
#     MODEL_PATH, load_in_8bit=False, trust_remote_code=True, device_map="auto"
# )
# # 对底座模型做一些设置
# model.gradient_checkpointing_enable()
# model.enable_input_require_grads()
# model.is_parallelizable = True
# model.model_parallel = True
# model.config.use_cache = (
#     False  # silence the warnings. Please re-enable for inference!
# )
#
# peft_config = LoraConfig(
#             task_type=TaskType.CAUSAL_LM,
#             inference_mode=False,
#             r=8,
#             lora_alpha=32,
#             lora_dropout=0.1,
#         )
# model = get_peft_model(model, peft_config) # 转为lora模型
#
# trainer = Trainer(
#             model=model,
#             train_dataset=dataset,
#             args=training_args,
#             data_collator=lambda x : data_collator_glm(x, tokenizer),
#         )
# trainer.train()
#
# if True:
#     config = LoraConfig(
#         task_type=TaskType.CAUSAL_LM,
#         target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
#         inference_mode=False, # 训练模式
#         r=8, # Lora 秩
#         lora_alpha=32, # Lora alaph，具体作用参见 Lora 原理
#         lora_dropout=0.1# Dropout 比例
#     )
#     args = TrainingArguments(
#         output_dir="./output/Qwen2_instruct_lora",
#         per_device_train_batch_size=4,
#         gradient_accumulation_steps=4,
#         logging_steps=10,
#         num_train_epochs=3,
#         save_steps=100,
#         learning_rate=1e-4,
#         save_on_each_node=True,
#         gradient_checkpointing=True
#     )
#     trainer = Trainer(
#         model=model,
#         args=args,
#         train_dataset=tokenized_id,
#         data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
#     )
#     trainer.train()