# SFT Trainer
import os 
import torch 
import warnings
warnings.filterwarnings('ignore')
from accelerate import PartialState
from datasets import load_dataset
from functools import partial
from pathlib import Path
from peft import LoraConfig, get_peft_model
from trl import get_peft_config, ModelConfig, SFTConfig, SFTTrainer
 
from model.handle_model import get_model 
from prompts.prompt import Prompt

####################################
data_path = '/gemini/data-2' 

# 填模型名就从huggingface拉，填路径就是本地模型
model_name = Path('/gemini/data-1/DeepSeek-R1-Distill-Qwen-32B')
# model_name = '/gemini/data-1/output_batllm-Qwen-32B/best_model'

# 训练后模型导出路径
output_dir = Path('/gemini/data-1/output_batllm-Qwen-32B-2/')

max_seq_length = 256

####################################

alpaca_prompt = Prompt.qa_prompt


# Must add EOS_TOKEN
def formatting_prompts_func(examples, eos_token): 
    inputs = examples["input"]
    outputs = examples["output"] 
    texts = []
    for q, a in zip(inputs, outputs):
        # Must add EOS_TOKEN, otherwise your generation will go on forever!
        text = alpaca_prompt.format(
            question=q.replace('您是一位电池研究与性能分析专家。', '').replace(' ', ''), 
            answer=a.replace(' ', ''), 
            eos_token=eos_token
        )
        texts.append(text)
    return {"text" : texts}


def train():   
    # Init ModelArgs
    model_args = ModelConfig(
        model_name_or_path=model_name,
        torch_dtype=torch.bfloat16,        
        attn_implementation=None #"flash_attention_2"
    )
    model, tokenizer = get_model(model_args)  
    
    # load dataset
    data_files = {
        'train': 'train/*lqy1.jsonl',
        'validation': 'eval/*lqy1.jsonl'
    }
    data = load_dataset(data_path, data_files=data_files['train'], split='train[:200]') 
 
    data = data.map(
        partial(formatting_prompts_func, eos_token=tokenizer.eos_token), 
        batched = True
    )
    
    # Peft model
    lora_config = LoraConfig(
        r=16, 
        lora_alpha=32, 
        lora_dropout=0.1,
        target_modules=['q_proj', 'v_proj'],
        task_type='CAUSAL_LM',
        bias='none',
        init_lora_weights=False,
    )
    lora_model = get_peft_model(model, lora_config)
  
    # Init SFTConfig
    training_args = SFTConfig(
        bf16=True, 
        dataset_text_field='text',
        dataloader_drop_last=True,
        dataloader_pin_memory=False,
        # max_seq_length=max_seq_length,
        # optim='adamw_bnb_8bit',
        save_total_limit=3,
        save_strategy='epoch',
        save_steps=20, 
        # eval_strategy='epoch',
        # eval_steps=5,
        gradient_accumulation_steps=4,
        do_eval=False,
        logging_steps=20, 
        learning_rate=5e-5,
        num_train_epochs=300,
        output_dir=output_dir,
        use_liger=True,
        per_device_train_batch_size=1,
        #auto_find_batch_size=True,
        warmup_ratio=0.1, 
        lr_scheduler_type='cosine', 
        load_best_model_at_end=False,
        gradient_checkpointing_kwargs={'use_reentrant': True} 
    )

    # Init SFTTrainer
    trainer = SFTTrainer(
        model=lora_model,
        args=training_args,
        train_dataset=data,
        # eval_dataset=data['validation'], 
        processing_class=tokenizer,
        peft_config=get_peft_config(model_args),  
    )  

    # Start training
    print('Start training')
    train_result = trainer.train() 

    # # Training result analysis  
    trainer.log_metrics('train', train_result.metrics)
    
    # eval_result = trainer.evaluate() 
    # trainer.log_metrics('eval', eval_result) 

    trainer.model.eval() 
    # Save model
    last_model_path = Path(training_args.output_dir) / 'last_model'
    if trainer.is_fsdp_enabled:
        trainer.accelerator.state.fsdp_plugin.set_state_dict_type(
            "FULL_STATE_DICT")
        with trainer.model.summon_full_params():
            trainer.save_model(last_model_path)
    else:
        trainer.save_model(last_model_path)

    trainer.model.save_pretrained(Path(training_args.output_dir) / 'peft_model')
   
    
if __name__ == '__main__':
    train()
    

