# -*- coding: utf-8 -*-
# time: 2025/5/10 15:08
# file: tf_small_微调.py
# author: hanson
import torch
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding
)
from datasets import Dataset
from sklearn.metrics import accuracy_score
import numpy as np
from transformers import (
    AutoTokenizer,
    AutoModelForQuestionAnswering,  # 抽取式问答
    TrainingArguments,
    Trainer,
    DefaultDataCollator
)
from datasets import load_dataset
import torch

# 1. 加载模型和分词器（45MB）
model_name = "hfl/chinese-bert-wwm-ext"  # 或替换为上表其他模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)

# 2. 加载微型数据集
dataset = load_dataset("json", data_files="./mini_qa.json")  # 自定义数据
dataset_train = dataset["train"]
dataset_ts = dataset["test"]

# 3. 数据预处理
def preprocess(examples):
    inputs = tokenizer(
        examples["question"],
        examples["context"],
        truncation="only_second",
        max_length=128,
        stride=32,
        return_overflowing_tokens=True,
        return_offsets_mapping=True,
        padding="max_length"
    )
    return inputs

tokenized_dataset_train = dataset_train.map(preprocess, batched=True)
tokenized_data_ts = dataset_ts.map(preprocess, batched=True)

# 4. 训练配置（可在CPU上运行）
training_args = TrainingArguments(
    output_dir="./tiny_qa_model",
    per_device_train_batch_size=8,
    num_train_epochs=3,
    save_steps=100,
    fp16=torch.cuda.is_available()
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset_train,
    eval_dataset=tokenized_data_ts,
    data_collator=DefaultDataCollator()
)

# 5. 开始训练（约10分钟）
trainer.train()

# 6. 保存模型（仅约50MB）
model.save_pretrained("./tiny_qa_final")
model.save_pretrained("./tiny_qa_final")