# -*- coding: utf-8 -*-
# time: 2025/5/10 15:08
# file: tf_small_微调.py
# author: hanson
import torch
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding
)
from datasets import Dataset
from sklearn.metrics import accuracy_score
import numpy as np

# 1. 准备迷你中文数据集（10条示例，实际替换为你的数据）
data = {
    "text": [
        "这家餐厅很好吃",
        "手机质量太差了",
        "物流速度很快",
        "客服态度恶劣",
        "性价比非常高",
        "包装破损严重",
        "使用体验很棒",
        "完全不值这个价",
        "会推荐给朋友",
        "售后服务差劲"
    ],
    "label": [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]  # 1=正面, 0=负面
}
dataset = Dataset.from_dict(data).train_test_split(test_size=0.2, seed=42)

# 2. 加载超小中文模型（40MB）
model_name = "ckiplab/bert-tiny-chinese"  # 华为开源的bert-tiny
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)

# 3. 数据预处理
def preprocess(examples):
    return tokenizer(examples["text"], truncation=True, max_length=64)

tokenized_data = dataset.map(preprocess, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

# 4. 定义评估指标
def compute_metrics(eval_pred):
    logits, labels = eval_pred
    preds = np.argmax(logits, axis=-1)
    return {"accuracy": accuracy_score(labels, preds)}

# 5. 训练配置（低资源适配）
training_args = TrainingArguments(
    output_dir="./tiny_bert_results",
    per_device_train_batch_size=8,  # 小批量适应低显存
    per_device_eval_batch_size=8,
    learning_rate=3e-5,
    num_train_epochs=5,             # 小数据集可以多训几轮
    logging_steps=10,
    evaluation_strategy="epoch",
    save_strategy="epoch",
    report_to="none",               # 禁用wandb等记录
    fp16=torch.cuda.is_available()  # 自动启用混合精度
)

# 6. 开始训练
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_data["train"],
    eval_dataset=tokenized_data["test"],
    data_collator=data_collator,
    compute_metrics=compute_metrics,
)

trainer.train()

# 7. 保存模型（仅3MB左右）
trainer.save_model("./tiny_chinese_sentiment_model")
model.save_pretrained("./tiny_chinese_sentiment_model")
# 8. 测试推理
test_text = "这个产品物超所值"
inputs = tokenizer(test_text, return_tensors="pt", truncation=True, max_length=64)
with torch.no_grad():
    logits = model(**inputs).logits
pred = torch.argmax(logits).item()
print(f"测试文本: {test_text}")
print(f"预测结果: {'正面' if pred == 1 else '负面'}")