import argparse
from datasets import load_dataset
from trl import SFTConfig, SFTTrainer
from peft import LoraConfig

# Parse command-line arguments
parser = argparse.ArgumentParser(description='Train model for a specific language.')
parser.add_argument('--lang', type=str, required=True, help='Programming language to train on')
parser.add_argument('--model-name', type=str, required=True, help='Model name to train')
args = parser.parse_args()

split = args.lang
dataset = load_dataset("nuprl/MultiPL-T", split=split)

def formatting_prompts_func(example):
    return example['content']

peft_config = LoraConfig(
    r=256,
    lora_alpha=256,
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",
    use_rslora=True,
)

sft_config = SFTConfig(
    output_dir=f"./train_output/codellama_lora_{split}",
    per_device_train_batch_size=1,
    gradient_accumulation_steps=8,
    learning_rate=2e-4,
    num_train_epochs=10,
    fp16=True,
    logging_strategy="steps",
    logging_steps=10,
    save_strategy="epoch",
    save_total_limit=10,
    packing=True,
    report_to="wandb"
)

trainer = SFTTrainer(
    args.model_name,
    train_dataset=dataset,
    formatting_func=formatting_prompts_func,
    args=sft_config,
    peft_config=peft_config,
)

trainer.train()

# save model and tokenizer
trainer.model.save_pretrained(f"./train_output/codellama_{split}")
trainer.tokenizer.save_pretrained(f"./train_output/codellama_{split}")
