import json
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextDataset, DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
# import os
# os.environ['HUGGINGFACE_HUB_CACHE'] = 'E:/ds-train-package'


def train_model():

    # 加载模型和分词器
    # model_name="Qwen/Qwen2.5-7B-Instruct-1M"
    model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
    # model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)

    # 读取 JSON 格式的数据集
    with open('packJson/train.json', 'r', encoding='utf-8') as file:
        data = json.load(file)

    # 处理数据为适合训练的格式（这里简单示例，实际可能更复杂）
    train_texts = [f"问题: {item['question']} 答案: {item['answer']}" for item in data]

    # 将文本转换为模型输入格式
    train_encodings = tokenizer(train_texts, truncation=True, padding=True, return_tensors='pt')

    # 创建自定义的数据集类
    class CustomDataset(torch.utils.data.Dataset):
        def __init__(self, encodings):
            self.encodings = encodings

        def __getitem__(self, idx):
            return {key: tensor[idx] for key, tensor in self.encodings.items()}

        def __len__(self):
            return len(self.encodings.input_ids)

    train_dataset = CustomDataset(train_encodings)

    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer, mlm=False
    )

    # 定义训练参数
    training_args = TrainingArguments(
        output_dir='./results/xiaofu',
        overwrite_output_dir=True,
        num_train_epochs=3,
        per_device_train_batch_size=4,
        save_steps=10000,
        save_total_limit=2,
        prediction_loss_only=True,
    )

    # 创建Trainer对象并开始训练
    trainer = Trainer(
        model=model,
        args=training_args,
        data_collator=data_collator,
        train_dataset=train_dataset,
    )

    trainer.train()
    trainer.save_model()


if __name__ == "__main__":
    train_model()