'''
多gpu 分布式
lora训练流程， 基于peft,accelerate,transformers

accelerate launch train_lora3.py # accelerate config
accelerate launch --multi_gpu --num_processes 2 train_lora3.py # 两个gpu

已测试
    lora+acce: linux 4GPU + 0.5b+数据集mycode10
    lora+acce: linux 4GPU + 0.5b+数据集mycode153 + MAX_LENGTH3000
        截断user_prompt而不是token
    
'''
import os
import time

import torch

# 可用gpu个数
# if torch.cuda.device_count() > 3:
#     os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'

from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq

from peft import LoraConfig, TaskType, get_peft_model

# from datasets import Dataset  # pip install datasets==2.18.0 （hugface）
if os.name == 'nt':
    pretrained_model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'  # 模型路径
    save_path = r'D:\code\other\LLMs\local_data\train_output'  # 输出路径
else:
    
    # pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
    pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-Coder-0.5B-Instruct'
    save_path = r'/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/train_output' # 输出路径
    # dataset_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/question_response_0.jsonl"


# 1 载入数据集
print('1 载入数据集')
from train_data_convert import get_dataset_mycode

train_dataset = get_dataset_mycode()
# time.sleep(20)
# 2  载入预训练模型，构造lora模型
print('2 载入预训练模型，构造lora模型')
from accelerate import Accelerator
from torch.utils.data import DataLoader

accelerator = Accelerator()
device = accelerator.device
# device = 'cuda:0'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=False,
                                          trust_remote_code=True)  # 用于数据预处理
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map=device,
                                             torch_dtype=torch.float16)  # device_map="auto",
# model.to(device)
model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],  #
    inference_mode=False,  # 训练模式
    r=4,  # Lora 秩 r=4
    lora_alpha=32,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1,  # Dropout 比例
    # loss_type='ForCausalLMLoss'
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
# 3 训练
print('3 训练')
is_accelerate = True
if is_accelerate:

    print(f"accelerator.device: {device}")
    # device = 'gpu'
    # from transformers import DataCollatorWithPadding
    # data_collator = DataCollatorWithPadding(tokenizer=tokenizer)  # 填充 保证每个batch的样本长度相同
    data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True)
    train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=1,
                                  collate_fn=data_collator)  # , collate_fn=data_collator
    eval_dataloader = DataLoader(train_dataset, batch_size=1, collate_fn=data_collator)
    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
    model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
        model, optimizer, train_dataloader, eval_dataloader
    )
    epoch_num = 11
    for epoch in range(epoch_num):  # 训练循环
        model.train()
        for step, batch in enumerate(train_dataloader):
            # print(step)
            bs = batch['input_ids'].shape
            batch = {k: v.to(device) for k, v in batch.items()}  # batch放到device
            outputs = model(**batch)  # PeftModelForCausalLM
            loss = outputs.loss
            # loss.backward()
            accelerator.backward(loss) # 这里同步多个GPU上的梯度

            optimizer.step()
            optimizer.zero_grad()
            # print(step)
            # if step % 5 == 0:
            print(f"Process {accelerator.process_index} Epoch {epoch}/{epoch_num} Step {step}/{len(train_dataloader)} Loss {loss.item()} batch {bs}")

            # 释放GPU未引用变量,避免step大时溢出
            torch.cuda.empty_cache()

        # 评估逻辑
        model.eval()
        eval_loss = 0
        with torch.no_grad():
            for batch in eval_dataloader:
                batch = {k: v.to(device) for k, v in batch.items()}
                outputs = model(**batch)
                eval_loss += outputs.loss.item() # T超过MAX_LENGTH会nan?
                torch.cuda.empty_cache()
        eval_loss /= len(eval_dataloader)
        print(f"Epoch {epoch} Evaluation Loss {eval_loss}")

        if accelerator.is_local_main_process:
            # 保存逻辑
            if epoch % 10 == 0 or epoch == epoch_num - 1:
                # accelerator.wait_for_everyone()
                unwrapped_model = accelerator.unwrap_model(model) # 去除模型包装
                unwrapped_model.save_pretrained(os.path.join(save_path, f"epoch_{epoch}"))
                # tokenizer.save_pretrained(os.path.join(save_path, f"epoch_{epoch}"))

else:
    args = TrainingArguments(
        output_dir=save_path,
        per_device_train_batch_size=1,
        gradient_accumulation_steps=4,
        logging_steps=2,
        num_train_epochs=20,
        save_steps=2,  # 为了快速演示，这里设置10，建议你设置成100
        learning_rate=1e-4,
        save_on_each_node=True,
        gradient_checkpointing=True,
        # 分布式训练相关的参数
        local_rank=int(os.getenv('LOCAL_RANK', -1)),  # 自动获取local rank
    )

    trainer = Trainer(
        model=model,
        args=args,  # 训练算法参数
        train_dataset=train_dataset,
        data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),  # tokenizer不变
    )
    trainer.train()

# torchrun --nproc_per_node=NUM_GPUS_YOU_HAVE your_training_script.py

#  95%|█████████▌| 38/40 [01:14<00:03,  1.95s/it]{'loss': 0.1331, 'grad_norm': 1.9070483446121216, 'learning_rate': 5e-06, 'epoch': 15.2}
# 100%|██████████| 40/40 [01:18<00:00,  1.95s/it]{'loss': 0.1137, 'grad_norm': 1.5855801105499268, 'learning_rate': 0.0, 'epoch': 16.0}

# 4 训练后推理
# 释放GPU内存
accelerator.free_memory()
if accelerator.is_local_main_process:
    print('4 训练后推理')
    from eval import infer_after_train
    model_path = os.path.join(save_path, f"epoch_10")  # 假设我们加载第18个epoch的模型
    infer_after_train(model_path)