from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from datasets import Dataset, load_metric
import numpy as np

# 下载模型和分词器文件到本地
# https://huggingface.co/bert-base-uncased/tree/main

# 示例训练和验证数据
train_texts = ["I love programming.", "The weather is nice today.", "How are you doing?"]
train_labels = [1, 0, 1]
val_texts = ["I hate rain.", "I enjoy sunny days."]
val_labels = [0, 1]

# 加载预训练模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')


# 准备数据集
def tokenize_function(examples):
    return tokenizer(examples['text'], truncation=True, padding=True)


# 创建训练和验证数据集
train_dataset = Dataset.from_dict({'text': train_texts, 'label': train_labels})
val_dataset = Dataset.from_dict({'text': val_texts, 'label': val_labels})

# 应用分词函数
train_dataset = train_dataset.map(tokenize_function, batched=True)
val_dataset = val_dataset.map(tokenize_function, batched=True)

# 设置格式
train_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
val_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

# 加载准确率评估指标
metric = load_metric("accuracy")


# 计算准确率的函数
def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    return metric.compute(predictions=predictions, references=labels)


# 定义训练参数
training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=3,  # 适当调整 epoch 数
    per_device_train_batch_size=8,
    per_device_eval_batch_size=8,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
    evaluation_strategy="epoch",  # 在每个 epoch 结束后进行评估
)

# 定义 Trainer 并进行训练
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=val_dataset,
    compute_metrics=compute_metrics,  # 添加 compute_metrics 函数
)

trainer.train()
