'''
Final Test Accuracy: 0.8882
训练总时间: 334.97 秒
'''
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
from sklearn.metrics import accuracy_score
import os
import torch
import matplotlib.pyplot as plt
import time 

# 控制是否跳过训练并直接加载已训练好的模型
skip_training = False  # 设置为 True 跳过训练，False 重新训练

# 加载本地数据集（离线加载 Parquet 文件）
data_files = {
    "train": "./imdb/plain_text/train-00000-of-00001.parquet",
    "test": "./imdb/plain_text/test-00000-of-00001.parquet",
    "unsupervised": "./imdb/plain_text/unsupervised-00000-of-00001.parquet",
}
dataset = load_dataset("parquet", data_files=data_files)

# 定义数据预处理函数
def preprocess_function(examples):
    return tokenizer(
        examples["text"], truncation=True, padding="max_length", max_length=128
    )

# 定义计算准确率的函数
def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    preds = predictions.argmax(axis=1)  # 获取每个样本的预测类别
    acc = accuracy_score(labels, preds)  # 计算准确率
    return {"accuracy": acc}

# 配置模型路径和训练后模型存储路径
local_model_path = "./models/bert-base-uncased"  # 离线存储的 BERT-base 模型路径
trained_model_path = "./results/trained_bert_base"  # 最终模型保存路径

if not skip_training:
    # 如果需要重新训练，加载分词器和模型
    tokenizer = AutoTokenizer.from_pretrained(local_model_path)  # 离线加载分词器
    encoded_dataset = dataset.map(preprocess_function, batched=True)
    model = AutoModelForSequenceClassification.from_pretrained(
        local_model_path, local_files_only=True, num_labels=2
    )  # 离线加载模型

    # 对训练数据集采样 100%
    train_dataset = encoded_dataset["train"].shuffle(seed=42).select(
        range(int(len(encoded_dataset["train"]) * 1))
    )

    # 对验证数据集采样 100%
    eval_dataset = encoded_dataset["test"].shuffle(seed=42).select(
        range(int(len(encoded_dataset["test"]) * 1))
    )



    # 配置训练参数
    training_args = TrainingArguments(
        output_dir="./results",                 # 暂存训练文件目录（不保存中间模型）
        evaluation_strategy="epoch",            # 每个 epoch 进行一次评估
        save_strategy="no",                     # 不保存中间模型
        logging_strategy="epoch",               # 每个 epoch 记录一次日志
        learning_rate=5e-5,                     # 学习率
        per_device_train_batch_size=64,         # 每个设备的训练批量大小
        gradient_accumulation_steps=2,          # 梯度累计步数（等效于 batch_size * 2）
        num_train_epochs=5,                     # 训练的 epoch 数
        weight_decay=0.01,                      # 权重衰减率，防止过拟合
        fp16=True,                              # 启用混合精度训练，提升速度并减少显存占用
        dataloader_num_workers=8,               # 数据加载线程数，利用多核 CPU
        gradient_checkpointing=True,            # 启用梯度检查点，减少显存占用
        report_to="none",                       # 禁用在线日志报告工具（如 WandB）
    )
    
    # 创建 Trainer 对象进行训练
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,  # 添加计算准确率的函数
    )

    # 禁用分词器多线程日志
    os.environ["TOKENIZERS_PARALLELISM"] = "false"
    # 记录训练开始时间
    time_start = time.time()
    # 开始训练模型
    trainer.train()

    # 记录训练结束时间
    time_end = time.time()
    print("训练时间",time_end-time_start)
    # 保存最终训练好的模型
    model.save_pretrained(trained_model_path)
    tokenizer.save_pretrained(trained_model_path)
    print(f"最终模型已保存到: {trained_model_path}")

    # 在验证集上评估模型性能
    results = trainer.evaluate(eval_dataset=eval_dataset)
    print(f"Final Test Accuracy: {results['eval_accuracy']:.4f}")
    print(f"训练总时间: {time_end - time_start:.2f} 秒")

    # 提取验证集的准确率
    eval_accuracy = [log["eval_accuracy"] for log in trainer.state.log_history if "eval_accuracy" in log]

    # 创建保存图片的目录
    output_dir = "picture"
    os.makedirs(output_dir, exist_ok=True)

    # 绘制验证集准确率曲线
    epochs = range(1, len(eval_accuracy) + 1)
    plt.figure(figsize=(10, 5))
    plt.plot(epochs, eval_accuracy, label="Validation Accuracy", marker="o")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    plt.title("Validation Accuracy")
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(output_dir, "accuracy_plot_BERT.png"))  # 保存准确率曲线图到文件

else:
    # 如果跳过训练，加载已保存的模型和分词器
    model = AutoModelForSequenceClassification.from_pretrained(trained_model_path)
    tokenizer = AutoTokenizer.from_pretrained(trained_model_path)

    # 在测试集上评估模型性能
    eval_dataset = dataset["test"].map(preprocess_function, batched=True)
    trainer = Trainer(
        model=model,
        compute_metrics=compute_metrics,  # 添加计算准确率的函数
    )
    results = trainer.evaluate(eval_dataset=eval_dataset)
    print(f"Final Test Accuracy: {results['eval_accuracy']:.4f}")


# 测试单条输入文本的情感分类
text = "I absolutely loved this movie! It was amazing and so well acted."
inputs = tokenizer(
    text, return_tensors="pt", truncation=True, padding=True, max_length=128
)

# 将模型和输入张量加载到相同的设备（CPU 或 GPU）
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
inputs = {key: value.to(device) for key, value in inputs.items()}

# 使用模型进行预测
outputs = model(**inputs)
logits = outputs.logits
predicted_class = logits.argmax().item()
print("Predicted Sentiment:", "Positive" if predicted_class == 1 else "Negative")
