import json
from transformers import AutoModelForCausalLM, AutoTokenizer
from datasets import Dataset
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.optim import AdamW
from torch.utils.data import DataLoader

# Check if CUDA is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Load your dataset
with open('customer_service_qa.json', 'r') as file:
    data = json.load(file)

# Split data into training and evaluation sets
train_data = data[:2]
eval_data = data[2:]

# Convert to a format suitable for training
train_dataset = Dataset.from_dict({
    "input_text": [item["question"] for item in train_data],
    "target_text": [item["answer"] for item in train_data]
})

eval_dataset = Dataset.from_dict({
    "input_text": [item["question"] for item in eval_data],
    "target_text": [item["answer"] for item in eval_data]
})

# Load the pre-trained model and tokenizer
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Move the model to the GPU if available
model.to(device)

# Tokenize the dataset
def tokenize_function(examples):
    model_inputs = tokenizer(examples["input_text"], max_length=128, padding="max_length", truncation=True, return_tensors="pt")
    labels = tokenizer(examples["target_text"], max_length=128, padding="max_length", truncation=True, return_tensors="pt")
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs

tokenized_train_dataset = train_dataset.map(tokenize_function, batched=True)
tokenized_eval_dataset = eval_dataset.map(tokenize_function, batched=True)

# Convert datasets.Dataset to torch.utils.data.Dataset
class CustomDataset(torch.utils.data.Dataset):
    def __init__(self, dataset):
        self.dataset = dataset

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        item = self.dataset[idx]
        # 将字典中的值转换为 torch.Tensor
        # return {k: torch.tensor(v) if not isinstance(v, torch.Tensor) else v for k, v in item.items()}
        return {k: torch.tensor(v) if isinstance(v, (list, float, int)) else v for k, v in item.items()}

train_dataset_torch = CustomDataset(tokenized_train_dataset)
eval_dataset_torch = CustomDataset(tokenized_eval_dataset)

# Custom training loop
def custom_train_loop(model, train_dataset, eval_dataset, num_epochs, batch_size, learning_rate, gradient_accumulation_steps):
    scaler = GradScaler()
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)
    optimizer = AdamW(model.parameters(), lr=learning_rate)

    # 启用梯度检查点以节省内存
    model.gradient_checkpointing_enable()

    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        optimizer.zero_grad()  # 在epoch开始时清零梯度

        for step, batch in enumerate(train_dataloader):
            # 清理缓存
            if step % 10 == 0:
                torch.cuda.empty_cache()

            batch = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()}

            with autocast():
                outputs = model(**batch)
                loss = outputs.loss / gradient_accumulation_steps  # 根据累积步数缩放损失

            scaler.scale(loss).backward()

            # 每 gradient_accumulation_steps 步更新一次参数
            if (step + 1) % gradient_accumulation_steps == 0:
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()

            total_loss += loss.item() * gradient_accumulation_steps

            if step % 10 == 0:
                print(f"Epoch {epoch + 1}, Step {step}, Loss: {loss.item()}")

        # 评估阶段
        model.eval()
        eval_loss = 0
        with torch.no_grad():  # 确保在评估时不计算梯度
            for batch in eval_dataloader:
                torch.cuda.empty_cache()  # 清理缓存
                batch = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()}
                outputs = model(**batch)
                eval_loss += outputs.loss.item()

        print(f"Epoch {epoch + 1}, Train Loss: {total_loss / len(train_dataloader)}, Eval Loss: {eval_loss / len(eval_dataloader)}")

# Use custom training loop
custom_train_loop(
    model=model,
    train_dataset=train_dataset_torch,
    eval_dataset=eval_dataset_torch,
    num_epochs=1,
    batch_size=1,
    learning_rate=5.6e-5,
    gradient_accumulation_steps=4
)
