import json
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import Dataset
import torch
from torch.cuda.amp import autocast, GradScaler  # 引入混合精度训练

# Check if CUDA is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Load your dataset
with open('output.json', 'r') as file:
    data = json.load(file)

# 将数据分为训练集和评估集
train_data = data[:50]  # 使用前两个数据作为训练集
eval_data = data[50:]   # 使用最后一个数据作为评估集

# Convert to a format suitable for training
train_dataset = Dataset.from_dict({
    "input_text": [item["question"] for item in train_data],
    "target_text": [item["answer"] for item in train_data]
})

eval_dataset = Dataset.from_dict({
    "input_text": [item["question"] for item in eval_data],
    "target_text": [item["answer"] for item in eval_data]
})

# Load the pre-trained model and tokenizer
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Move the model to the GPU if available
model.to(device)

# Tokenize the dataset
def tokenize_function(examples):
    model_inputs = tokenizer(examples["input_text"], max_length=128, padding="max_length", truncation=True)
    labels = tokenizer(examples["target_text"], max_length=128, padding="max_length", truncation=True)
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs

tokenized_train_dataset = train_dataset.map(tokenize_function, batched=True)
tokenized_eval_dataset = eval_dataset.map(tokenize_function, batched=True)

# Define training arguments
training_args = TrainingArguments(
    output_dir="./results",
    evaluation_strategy="epoch",
    learning_rate=5.6e-5,
    per_device_train_batch_size=2,
    per_device_eval_batch_size=2,
    num_train_epochs=3,
    weight_decay=0.01,
    fp16=True,  # 启用混合精度训练
    no_cuda=False if torch.cuda.is_available() else True,
)

# Initialize the Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_train_dataset,
    eval_dataset=tokenized_eval_dataset,
)

# Train the model
trainer.train()