from transformers import HfArgumentParser
import torch

from datasets import load_dataset
from argumnets import ModelArgumnets,MyTrainingArguments,Datarguments

torch.manual_seed(2618)


import os
import wandb
from transformers.trainer_utils import is_main_process
from initialize import init_model,init_tok,init_rainer,process_data,init_model_tok


# from peft import LoraConfig, TaskType
# from peft import get_peft_model
# def lora_cret(r=8, target_modules=["q_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, lora_alpha=16, lora_dropout=0.05):
#     """
#     Creates a LoraConfig object with the specified parameters.
    
#     Args:
#         r (int): The rank for the LORA layers.
#         target_modules (list): List of module names to apply LORA layers.
#         task_type (TaskType): The task type for LORA, e.g., CAUSAL_LM.
#         lora_alpha (int): The scaling factor for LORA layers.
#         lora_dropout (float): The dropout rate for LORA layers.

#     Returns:
#         LoraConfig: The configured LoraConfig object.
#     """
#     return LoraConfig(
#         r=r,
#         target_modules=target_modules,
#         task_type=task_type,
#         lora_alpha=lora_alpha,
#         lora_dropout=lora_dropout
#     )


import os
os.environ['WANDB_MODE'] = 'offline'
def main():
    parser = HfArgumentParser((ModelArgumnets, Datarguments,  MyTrainingArguments))
    model_arg, data_arg, train_arg = parser.parse_args_into_dataclasses()

    model,tok=init_model_tok(model_arg)

    local_rank = int(os.environ["LOCAL_RANK"]) if "LOCAL_RANK" in os.environ else -1
    
    if is_main_process(local_rank):
        wandb.init(project='atom', name=train_arg.run_name)
        model.print_trainable_parameters()


    # print(type(data_cfg)) 
    # print("type(data_cfg)") 
    # data_cfg_dict = vars(data_cfg)
    train_dataset = load_dataset('parquet', data_files={'train':data_arg.train_dataset_path.split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':data_arg.eval_dataset_path})['train']
    train_dataset = train_dataset.map(process_data, batched=True, num_proc=data_arg.num_data_proc, fn_kwargs={
        "tokenizer": tok,
        **vars(data_arg)
    })
    eval_dataset = eval_dataset.map(process_data, batched=True, num_proc=data_arg.num_data_proc, fn_kwargs={
        "tokenizer": tok,
        **vars(data_arg)
    })
    

    # 初始化Trainer
    trainer = init_rainer(model, train_arg, train_dataset, eval_dataset, tok)
    trainer.train()

if __name__ == '__main__':
    main()
