import torch
from torch import nn
from torch.utils.data import DataLoader
from transformers import BertTokenizer, BertForSequenceClassification, AdamW, get_scheduler
from datasets import load_dataset
from sklearn.metrics import accuracy_score
from tqdm import tqdm  # 用于显示进度条

# 检查设备配置（使用 GPU 还是 CPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 1. 数据准备
# 使用 Hugging Face 数据集库加载 IMDB 电影评论数据集
dataset = load_dataset("imdb")

# 2. 加载 BERT 分词器
# BERT 分词器将文本转换为用于模型输入的 tokens
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")

# 3. 定义编码函数
def encode_dataset(d):
    return tokenizer(d['text'], padding='max_length', truncation=True, max_length=512)

# 4. 对训练和测试集进行编码
train_dataset = dataset['train'].map(encode_dataset, batched=True)
test_dataset = dataset['test'].map(encode_dataset, batched=True)

# 5. 设置数据集格式
train_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
test_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

# 6. 创建数据加载器
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=16)

# 7. 加载预训练的 BERT 模型
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
model.to(device)

# 8. 定义优化器和学习率调度器
optimizer = AdamW(model.parameters(), lr=5e-5)
num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler("linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps)

# 9. 训练模型
def train_model(model, dataloader, optimizer, lr_scheduler, num_epochs=3):
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch + 1}/{num_epochs}")
        for batch in progress_bar:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)

            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss
            total_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_scheduler.step()

            progress_bar.set_postfix(loss=loss.item())

        avg_loss = total_loss / len(dataloader)
        print(f"Epoch {epoch + 1}, Loss: {avg_loss:.2f}")

    # 保存训练好的模型
    model.save_pretrained("bert_imdb_model")
    tokenizer.save_pretrained("bert_imdb_model")

# 10. 评估模型
def evaluate_model(model, dataloader):
    model.eval()
    predictions, true_labels = [], []

    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)

            outputs = model(input_ids, attention_mask=attention_mask)
            logits = outputs.logits
            preds = torch.argmax(logits, dim=1).cpu().numpy()
            predictions.extend(preds)
            true_labels.extend(labels.cpu().numpy())

    accuracy = accuracy_score(true_labels, predictions)
    return accuracy

# 训练和评估模型
train_model(model, train_dataloader, optimizer, lr_scheduler)

# 在测试集上评估性能
accuracy = evaluate_model(model, test_dataloader)
print(f"Test Accuracy: {accuracy * 100:.2f}%")