import os
import pandas as pd
os.environ['HTTP_PROXY'] = "http://127.0.0.1:7890"
os.environ['HTTPS_PROXY'] = "http://127.0.0.1:7890"
from transformers import DataCollatorWithPadding
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
from datasets import Dataset

def preprocess_function(examples):
    # 使用tokenizer对examples中的文本进行预处理，包括填充到最大长度和截断处理
    return tokenizer(examples["text"], padding="max_length", truncation=True)

if __name__ == '__main__':
    # 创建一个包含文本和标签的示例数据集
    data = {
        'text': ['This is a sample text', 'This is another sample text'],
        'label': [1, 0]
    }
    df = pd.DataFrame(data)

    # 加载预训练的BERT分词器
    tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased")
    # 确定数据集中的唯一标签数量
    num_labels = len(df['label'].unique())
    # 根据唯一标签数量加载预训练的BERT模型，并进行序列分类任务的微调
    model = AutoModelForSequenceClassification.from_pretrained("bert-large-uncased", num_labels=num_labels)

    # 将Pandas DataFrame转换为Hugging Face Dataset格式
    dataset = Dataset.from_pandas(df)
    # 对数据集中的文本进行批量tokenize处理
    tokenized_dataset = dataset.map(preprocess_function, batched=True)

    # 设置数据集的格式为PyTorch张量，并指定需要的列
    tokenized_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
    # 创建一个数据整理器，用于在训练过程中对batch数据进行padding
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)

    # 定义训练参数，包括输出目录、训练轮数、batch大小等
    training_args = TrainingArguments(
        output_dir='./finetuned_bert',
        num_train_epochs=3,
        per_device_train_batch_size=16,
        per_device_eval_batch_size=64,
        warmup_steps=500,
        weight_decay=0.01,
        logging_dir='./logs',
        logging_steps=10,
    )
    # 初始化Trainer对象，用于训练和评估模型
    trainer = Trainer(
        model= model,
        args= training_args,   
        data_collator=data_collator,
        train_dataset=tokenized_dataset,
    )

    # 开始训练模型
    trainer.train()
    # 保存训练好的分词器
    tokenizer.save_pretrained('./finetuned_bert')