from transformers import BertTokenizer, TFBertModel, BertConfig
import tensorflow as tf
from tensorflow.keras import layers, models

# 加载预训练的BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert_model = TFBertModel.from_pretrained('bert-base-uncased')

# 构建BERT分类模型
class BertClassifier(models.Model):
    def __init__(self, num_classes):
        super(BertClassifier, self).__init__()
        self.bert = bert_model
        self.dropout = layers.Dropout(0.1)
        self.classifier = layers.Dense(num_classes, activation='softmax')

    def call(self, inputs):
        outputs = self.bert(inputs)
        pooled_output = outputs.pooler_output
        pooled_output = self.dropout(pooled_output)
        return self.classifier(pooled_output)

# 示例：文本分类
num_classes = 8
classifier = BertClassifier(num_classes)
classifier.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# 示例数据
texts = ["This is a sample text.", "BERT is powerful."]
labels = [0, 1]

# 编码文本
encoded_inputs = tokenizer(texts, padding=True, truncation=True, return_tensors='tf')

# 训练模型
classifier.fit(encoded_inputs, labels, epochs=5, batch_size=2)