import argparse
from peft import get_peft_model, LoraConfig, TaskType
from transformers import TrainingArguments

def create_parser():
    parser = argparse.ArgumentParser()
    
    # Training arguments
    train_group = parser.add_argument_group("Training Arguments")
    train_group.add_argument(
        "--save_only_model", type=bool, default=True,
    )
    train_group.add_argument(
        "--save_steps", type=int, default=100,
    )
    train_group.add_argument(
        "--save_total_limit", type=int, default=10,
    )
    train_group.add_argument(
        "--bf16", type=bool, default=True,
    )
    train_group.add_argument(
        "--weight_decay", type=float, default=0,
    )
    train_group.add_argument(
        "--learning_rate", type=float, default=1e-7,
    )
    train_group.add_argument(
        "--lr_scheduler_type", type=str, default="cosine",
    )
    train_group.add_argument(
        "--warmup_ratio", type=float, default=0.1,
    )
    train_group.add_argument(
        "--eval_strategy", type=str, default="steps",
    )
    train_group.add_argument(
        "--eval_steps", type=int, default=100,
    )
    train_group.add_argument(
        "--load_best_model_at_end", type=bool, default=True,
    )
    train_group.add_argument(
        "--logging_strategy", type=str, default="steps",
    )
    train_group.add_argument(
        "--logging_steps", type=int, default=1,
    )
    train_group.add_argument(
        "--save_strategy", type=str, default="steps",
    )
    train_group.add_argument(
        "--run_name", type=str, default="atom",
    )
    train_group.add_argument(
        "--output_dir", type=str, default="checkpoints/",
    )
    train_group.add_argument(
        "--per_device_train_batch_size", type=int, default=4,
    )
    train_group.add_argument(
        "--per_device_eval_batch_size", type=int, default=4,
    )
    train_group.add_argument(
        "--num_train_epochs", type=int, default=20,
    )

    # Dataset arguments
    data_group = parser.add_argument_group("Dataset Arguments")
    data_group.add_argument(
        "--num_data_proc", type=int, default=16,
    )
    data_group.add_argument(
        "--skip_eos_token", type=bool, default=False,
    )
    data_group.add_argument(
        "--eval_size", type=int, default=256,
    )
    data_group.add_argument(
        "--max_length", type=int, default=512,
    )
    data_group.add_argument(
        "--train_dataset_path", type=str, default="/mix_cl_a50.parquet",
    )
    data_group.add_argument(
        "--eval_dataset_path", type=str, default="/mix_cl_a50.parquet",
    )
    
    # Model arguments
    model_group = parser.add_argument_group("Model Arguments")
    model_group.add_argument(
        "--pretrained_model_name_or_path", type=str, default="models/codellm",
    )

    return parser

def parse_train():
    parser = create_parser()
    args = parser.parse_args()
    
    model_args = {key: getattr(args, key) for key in vars(args) if key in [
        'pretrained_model_name_or_path'
    ]}
    
    dataset_args = {key: getattr(args, key) for key in vars(args) if key in [
        'train_dataset_path', 'eval_dataset_path', 'num_data_proc', 'skip_eos_token', 
        'eval_size', 'max_length'
    ]}
    
    training_args = {key: getattr(args, key) for key in vars(args) if key in [
        'run_name', 'output_dir', 'per_device_train_batch_size', 'per_device_eval_batch_size',
        'num_train_epochs', 'weight_decay', 'learning_rate', 'lr_scheduler_type', 'warmup_ratio',
        'eval_strategy', 'eval_steps', 'load_best_model_at_end', 'logging_strategy', 'logging_steps',
        'save_strategy', 'save_steps', 'save_total_limit', 'save_only_model', 'bf16'
    ]}
    
    training_args = TrainingArguments(
        output_dir=training_args['output_dir'],
        run_name=training_args['run_name'],
        per_device_train_batch_size=training_args['per_device_train_batch_size'],
        per_device_eval_batch_size=training_args['per_device_eval_batch_size'],
        num_train_epochs=training_args['num_train_epochs'],
        weight_decay=training_args['weight_decay'],
        learning_rate=training_args['learning_rate'],
        lr_scheduler_type=training_args['lr_scheduler_type'],
        warmup_ratio=training_args['warmup_ratio'],
        evaluation_strategy=training_args['eval_strategy'],
        eval_steps=training_args['eval_steps'],
        load_best_model_at_end=training_args['load_best_model_at_end'],
        logging_strategy=training_args['logging_strategy'],
        logging_steps=training_args['logging_steps'],
        save_strategy=training_args['save_strategy'],
        save_steps=training_args['save_steps'],
        save_total_limit=training_args['save_total_limit'],
        bf16=training_args['bf16']
    )
    
    return model_args, dataset_args, training_args


lora_set = LoraConfig(
    lora_alpha=16,
    lora_dropout=0.05,
    r=8,
    target_modules=["q_proj", "v_proj"],
    task_type=TaskType.CAUSAL_LM
)