import json
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer

def load_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data

def preprocess_data(data):
    processed_data = []
    for item in data:
        input_text = item['prompt'] + " <MIDDLE> " + item['suffix']
        target_text = item['canonical_solution']
        processed_data.append({"input": input_text, "target": target_text})
    return processed_data

def load_model_and_tokenizer(model_name):
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    return tokenizer, model

def tokenize_data(tokenizer, data):
    def tokenize_function(examples):
        inputs = tokenizer(examples["input"], truncation=True, padding="max_length", max_length=512)
        labels = tokenizer(examples["target"], truncation=True, padding="max_length", max_length=512)
        inputs["labels"] = labels["input_ids"]
        return inputs

    dataset = Dataset.from_dict({"input": [item["input"] for item in data], "target": [item["target"] for item in data]})
    tokenized_dataset = dataset.map(tokenize_function, batched=True)
    return tokenized_dataset

def train(model, tokenizer, tokenized_dataset, output_dir):
    training_args = TrainingArguments(
        output_dir=output_dir,
        per_device_train_batch_size=32,
        num_train_epochs=6,
        logging_dir="./logs",
        logging_steps=10,
        save_steps=500,
        save_total_limit=2,
        evaluation_strategy="steps",
        eval_steps=500,
        warmup_steps=100,
        weight_decay=0.01,
        fp16=True,  # 启用混合精度训练
        push_to_hub=False,
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_dataset,
        tokenizer=tokenizer,
    )

    trainer.train()
    trainer.save_model(output_dir)

def main():
    # 数据集路径
    data_path = "dataset.json"
    model_name = "codellama/CodeLlama-7b-hf"
    # 输出目录
    output_dir = "./codellama-7b-finetuned"
    # 加载数据
    data = load_data(data_path)
    processed_data = preprocess_data(data)
    # 加载模型和 tokenizer
    tokenizer, model = load_model_and_tokenizer(model_name)
    # 数据 tokenization
    tokenized_dataset = tokenize_data(tokenizer, processed_data)
    # 训练模型
    train(model, tokenizer, tokenized_dataset, output_dir)

if __name__ == "__main__":
    main()