'''
多gpu 分布式
lora训练流程， 基于peft,accelerate,transformers

accelerate launch train_lora_deepspeed.py # accelerate config
accelerate launch --multi_gpu --num_processes 2 train_lora_deepspeed.py # 两个gpu

deepspeed --num_gpus=2 train_lora_deepspeed.py

已测试
    lora+acce: linux 4GPU + 0.5b+数据集mycode153 + MAX_LENGTH3000
        截断user_prompt而不是token
        accelerate launch --multi_gpu --num_processes 4 train_lora_deepspeed.py

    lora+transformers: linux 1*3090GPU + 7b+数据集mycode # 可以
        python train_lora_deepspeed.py
    lora+deepspeed: linux 4*3090GPU + 7b+数据集mycode10 # 不可以

    
'''
import os

import time

import torch
# 可用gpu个数

os.environ['CUDA_VISIBLE_DEVICES'] = '0,2,3'
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq,BitsAndBytesConfig

from peft import LoraConfig, TaskType, get_peft_model,prepare_model_for_kbit_training

# from datasets import Dataset  # pip install datasets==2.18.0 （hugface）
if os.name == 'nt':
    pretrained_model_name_or_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'  # 模型路径
    save_path = r'D:\code\other\LLMs\local_data\train_output'  # 输出路径
else:

    # pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
    pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2___5-Coder-7B'
    # pretrained_model_name_or_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-Coder-0.5B-Instruct'
    save_path = r'/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/train_output'  # 输出路径
    # dataset_path = r"/home/ps/zhangxiancai/llm_deploy/LLMs/local_data/question_response_0.jsonl"

# 1 载入数据集
print('1 载入数据集')
from train_data_convert import get_dataset_mycode

train_dataset = get_dataset_mycode()
print(f'数据个数 {len(train_dataset)}')
# time.sleep(20)
# 2  载入预训练模型，构造lora模型
print('2 载入预训练模型，构造lora模型')
is_load_in_8bit = False # pip install -U bitsandbytes
from torch.utils.data import DataLoader

device = 'cuda'
# device = 'auto'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, use_fast=False,
                                          trust_remote_code=True)  # 用于数据预处理
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map=device,
                                             torch_dtype=torch.float16,)
# # 配置量化参数
# bnb_config = BitsAndBytesConfig(
#     load_in_4bit=True,  # 或者 load_in_8bit=True，取决于你的需求
#     bnb_4bit_use_double_quant=True,
#     bnb_4bit_quant_type="nf4",
#     bnb_4bit_compute_dtype=torch.float16,
# )
# model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map=device,
#                                              torch_dtype=torch.float16, quantization_config=bnb_config)  # device_map="auto",  torch.float16  torch_dtype=torch.float32
# model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map=device,
#                                               load_in_8bit = True)
# model.to(device)
model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    # target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],  #
    target_modules=["v_proj",],  #
    inference_mode=False,  # 训练模式
    r=2,  # Lora 秩 r=4
    lora_alpha=32,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1,  # Dropout 比例
    # loss_type='ForCausalLMLoss'
)
if is_load_in_8bit:
    model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config)
model.print_trainable_parameters()
# 3 训练
# train_mode = 'accelerate'
# train_mode = 'deepspeed'
train_mode = 'transformers'
print(f'3 训练 {train_mode}')
if train_mode == 'accelerate':
    from accelerate import Accelerator
    accelerator = Accelerator()
    # device = accelerator.device
    print(f"accelerator.device: {device}")
    # device = 'gpu'
    # from transformers import DataCollatorWithPadding
    # data_collator = DataCollatorWithPadding(tokenizer=tokenizer)  # 填充 保证每个batch的样本长度相同
    data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True)
    train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=1,
                                  collate_fn=data_collator)  # , collate_fn=data_collator
    eval_dataloader = DataLoader(train_dataset, batch_size=1, collate_fn=data_collator)
    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
    model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
        model, optimizer, train_dataloader, eval_dataloader
    )
    epoch_num = 11
    for epoch in range(epoch_num):  # 训练循环
        model.train()
        for step, batch in enumerate(train_dataloader):
            # print(step)
            bs = batch['input_ids'].shape
            batch = {k: v.to(device) for k, v in batch.items()}  # batch放到device
            outputs = model(**batch)  # PeftModelForCausalLM
            loss = outputs.loss
            # loss.backward()
            accelerator.backward(loss)  # 这里同步多个GPU上的梯度

            optimizer.step()
            optimizer.zero_grad()
            # print(step)
            # if step % 5 == 0:
            print(
                f"Process {accelerator.process_index} Epoch {epoch}/{epoch_num} Step {step}/{len(train_dataloader)} Loss {loss.item()} batch {bs}")

            # 释放GPU未引用变量,避免step大时溢出
            torch.cuda.empty_cache()

        # 评估逻辑
        model.eval()
        eval_loss = 0
        with torch.no_grad():
            for batch in eval_dataloader:
                batch = {k: v.to(device) for k, v in batch.items()}
                outputs = model(**batch)
                eval_loss += outputs.loss.item()  # T超过MAX_LENGTH会nan?
                torch.cuda.empty_cache()
        eval_loss /= len(eval_dataloader)
        print(f"Epoch {epoch} Evaluation Loss {eval_loss}")

        if accelerator.is_local_main_process:
            # 保存逻辑
            if epoch == epoch_num - 1:
                # accelerator.wait_for_everyone()
                unwrapped_model = accelerator.unwrap_model(model)  # 去除模型包装
                unwrapped_model.save_pretrained(os.path.join(save_path, f"epoch_{epoch}"))
                # tokenizer.save_pretrained(os.path.join(save_path, f"epoch_{epoch}"))

    # 4 训练后推理
    # 释放GPU内存
    accelerator.free_memory()
    if accelerator.is_local_main_process:
        print('4 训练后推理')
        from eval import infer_after_train
        model_path = os.path.join(save_path, f"epoch_{epoch}")  # 假设我们加载第18个epoch的模型
        infer_after_train(model_path)
elif train_mode == 'deepspeed':  # deepspeed
    # 初始化 DeepSpeed
    import deepspeed  # pip install deepspeed # 0.16.4

    # ds_config = {
    #     "train_batch_size": 2,
    #     "gradient_accumulation_steps": 1,
    #     "optimizer": {
    #         "type": "Adam",
    #         "params": {
    #             "lr": 0.00015
    #         }
    #     },
    #     "fp16": {
    #         "enabled": False
    #     },
    #     "zero_optimization": True
    # } # 训练算法相关参数
    # ds_config = {
    #     "train_batch_size": 32,
    #     "gradient_accumulation_steps": 1,
    #     "fp16": {
    #         "enabled": True
    #     },
    #     "zero_optimization": {
    #         "stage": 2
    #     },
    #     "optimizer": {
    #         "type": "Adam",
    #         "params": {
    #             "lr": 3e-5,
    #             "betas": [0.8, 0.999],
    #             "eps": 1e-8
    #         }
    #     }
    # }

    ds_config = {
        "train_batch_size": 4,
        "gradient_accumulation_steps": 1,
        "fp16": {
            "enabled": True,
        },
        "zero_optimization": {
            "stage": 3,  # 使用ZeRO第3阶段进行更高效的内存管理
            "allgather_partitions": True,
            "allgather_bucket_size": 2e8,
            "reduce_scatter": True,
            "reduce_bucket_size": 2e8,
            "overlap_comm": True,
            "load_from_fp32_weights": True,
            "elastic_checkpoint": True
        },
        "optimizer": {
            "type": "Adam",
            "params": {
                "lr": 3e-5,
                "betas": [0.8, 0.999],
                "eps": 1e-8
            }
        }
    }

    model_engine, optimizer, _, _ = deepspeed.initialize(
        model=model,
        model_parameters=model.parameters(),
        config=ds_config
    )
    # 训练循环...
    data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True)
    train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2,
                                  collate_fn=data_collator)  # , collate_fn=data_collator
    epoch_num = 2
    for epoch in range(epoch_num):
        for step, batch in enumerate(train_dataloader):
            # print(step)
            bs = batch['input_ids'].shape
            batch = {k: v.to(device) for k, v in batch.items()}  # batch放到device
            outputs = model_engine(**batch)
            loss = outputs.loss
            model_engine.backward(loss)
            model_engine.step()

            print(f"Epoch {epoch}/{epoch_num} Step {step}/{len(train_dataloader)} Loss {loss.item()} batch {bs}")
    # 保存逻辑
    # model_engine.save_checkpoint(save_path, loss.item())
    model_engine.save_pretrained(os.path.join(save_path, f"epoch_ds_{epoch}"))  # 保存lora模型

    # 4 训练后推理
    # 释放GPU内存
    # accelerator.free_memory()
    print('4 训练后推理')
    from eval import infer_after_train
    model_path = os.path.join(save_path, f"epoch_ds_{epoch}")  # 假设我们加载第18个epoch的模型
    infer_after_train(model_path)

else:
    # ds_config = {
    #     "train_batch_size": 2,
    #     "gradient_accumulation_steps": 1,
    #     "optimizer": {
    #         "type": "Adam",
    #         "params": {
    #             "lr": 0.00015
    #         }
    #     },
    #     "fp16": {
    #         "enabled": False
    #     },
    #     "zero_optimization": True
    # } #
    args = TrainingArguments(
        output_dir=os.path.join(save_path, f"transformers_lora_{time.time()}"),
        per_device_train_batch_size=1,
        gradient_accumulation_steps=4,
        logging_steps=2,
        num_train_epochs=20,
        save_steps=100,  # steps 为了快速演示，这里设置10，建议你设置成100
        learning_rate=1e-4,
        save_on_each_node=True,
        gradient_checkpointing=True, # 减少内存 反向传播期间重新计算中间激活值
        # 分布式训练相关的参数
        local_rank=int(os.getenv('LOCAL_RANK', -1)),  # 自动获取local rank
        # python -m torch.distributed.launch --nproc_per_node=3 train_lora_deepspeed.py
        # deepspeed=ds_config, # Todo deepspeed+transformers
    )

    trainer = Trainer(
        model=model,
        args=args,  # 训练算法参数
        train_dataset=train_dataset,
        data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),  # tokenizer不变
    )
    trainer.train()
    # print('4 训练后推理')
    # from eval import infer_after_train
    # model_path = os.path.join(save_path, f"epoch_ds_{epoch}")  # 假设我们加载第18个epoch的模型
    # infer_after_train(model_path)
