import time
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
from sklearn.metrics import accuracy_score
import os
import torch
import matplotlib.pyplot as plt
import nlpaug.augmenter.word as naw  # 数据增强工具

# 控制是否跳过训练并直接加载已训练好的模型
skip_training = False  # 设置为 True 跳过训练，False 重新训练

# 加载本地数据集（离线加载 Parquet 文件）
data_files = {
    "train": "./imdb/plain_text/train-00000-of-00001.parquet",
    "test": "./imdb/plain_text/test-00000-of-00001.parquet",
}
dataset = load_dataset("parquet", data_files=data_files)

from datasets import concatenate_datasets
import nlpaug.augmenter.word as naw
import multiprocessing
# 定义数据增强函数
def augment_text_batch(batch):
    augmenter = naw.SynonymAug(aug_src="wordnet")
    # 确保增强后的文本是单一字符串，而不是字符串列表
    batch["text"] = [" ".join(text) if isinstance(text, list) else text for text in augmenter.augment(batch["text"])]
    return batch

# 数据增强（并行加速）
print("Applying data augmentation...")
augmented_dataset = dataset["train"].map(augment_text_batch, batched=True, num_proc=multiprocessing.cpu_count())
print("Data augmentation completed.")

# 合并数据集
combined_train = concatenate_datasets([dataset["train"], augmented_dataset])
combined_dataset = {
    "train": combined_train,
    "test": dataset["test"],
}

print("Datasets combined successfully!")





# 定义数据预处理函数
def preprocess_function(examples):
    return tokenizer(
        examples["text"], truncation=True, padding="max_length", max_length=128
    )

# 定义计算准确率的函数
def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    preds = predictions.argmax(axis=1)  # 获取每个样本的预测类别
    acc = accuracy_score(labels, preds)  # 计算准确率
    return {"accuracy": acc}

# 配置模型路径和训练后模型存储路径
local_model_path = "./models/distilbert-base-uncased"
trained_model_path = "./results/trained_distilbert"

if not skip_training:
    # 如果需要重新训练，加载分词器和模型
    local_tokenizer_path = "./models/distilbert-base-uncased"
    tokenizer = AutoTokenizer.from_pretrained(local_tokenizer_path)
    encoded_dataset = combined_dataset["train"].map(preprocess_function, batched=True)
    eval_dataset = combined_dataset["test"].map(preprocess_function, batched=True)
    model = AutoModelForSequenceClassification.from_pretrained(
        local_model_path, local_files_only=True, num_labels=2
    )

    # 配置训练参数
    training_args = TrainingArguments(
        output_dir="./results",                 # 保存训练结果和模型的目录
        evaluation_strategy="epoch",            # 每个 epoch 进行一次评估
        save_strategy="no",                     # 不保存中间模型
        logging_strategy="epoch",               # 每个 epoch 记录一次日志
        logging_dir="./logs",                   # 日志保存目录
        learning_rate=5e-5,                     # 学习率
        per_device_train_batch_size=64,         # 每个设备的训练批量大小
        gradient_accumulation_steps=2,          # 梯度累计步数（等效于 batch_size * 2）
        num_train_epochs=5,                     # 训练的 epoch 数
        weight_decay=0.01,                      # 权重衰减率，防止过拟合
        fp16=True,                              # 启用混合精度训练，提升速度并减少显存占用
        dataloader_num_workers=8,               # 数据加载线程数，利用多核 CPU
        gradient_checkpointing=True,            # 启用梯度检查点，减少显存占用
        report_to="none",                       # 禁用在线日志报告工具（如 WandB）
    )

    # 自定义 Trainer 回调以记录验证集准确率
    eval_accuracies = []

    class CustomTrainer(Trainer):
        def evaluate(self, eval_dataset=None, ignore_keys=None, metric_key_prefix="eval"):
            eval_results = super().evaluate(eval_dataset=eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
            eval_accuracies.append(eval_results["eval_accuracy"])  # 记录验证集准确率
            return eval_results

    # 创建 Trainer 对象
    trainer = CustomTrainer(
        model=model,
        args=training_args,
        train_dataset=encoded_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,  # 添加计算准确率的函数
    )

    # 禁用分词器多线程日志
    os.environ["TOKENIZERS_PARALLELISM"] = "false"

    # 开始训练模型
    time_start = time.time()
    trainer.train()
    time_end = time.time()

    # 在验证集上评估模型性能
    results = trainer.evaluate()
    print(f"Final Test Accuracy: {results['eval_accuracy']:.4f}")
    print(f"训练时间: {time_end - time_start:.2f} 秒")

    # 保存最终模型
    os.makedirs(trained_model_path, exist_ok=True)
    model.save_pretrained(trained_model_path)
    tokenizer.save_pretrained(trained_model_path)

    # 绘制验证集准确率曲线
    epochs = range(1, len(eval_accuracies) + 1)
    plt.figure(figsize=(10, 5))
    plt.plot(epochs, eval_accuracies, label="Validation Accuracy", marker="o")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    plt.title("Validation Accuracy Curve")
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(trained_model_path, "data_stren.png"))  # 保存准确率曲线图
else:
    # 如果跳过训练，加载已保存的模型和分词器
    model = AutoModelForSequenceClassification.from_pretrained(trained_model_path)
    tokenizer = AutoTokenizer.from_pretrained(trained_model_path)

    # 在测试集上评估模型性能
    eval_dataset = dataset["test"].map(preprocess_function, batched=True)
    trainer = Trainer(
        model=model,
        compute_metrics=compute_metrics,  # 添加计算准确率的函数
    )
    results = trainer.evaluate(eval_dataset=eval_dataset)
    print(f"Final Test Accuracy: {results['eval_accuracy']:.4f}")


# 测试单条输入文本的情感分类
text = "I absolutely loved this movie! It was amazing and so well acted."
inputs = tokenizer(
    text, return_tensors="pt", truncation=True, padding=True, max_length=128
)

# 将模型和输入张量加载到相同的设备（CPU 或 GPU）
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
inputs = {key: value.to(device) for key, value in inputs.items()}

# 使用模型进行预测
outputs = model(**inputs)
logits = outputs.logits
predicted_class = logits.argmax().item()
print("Predicted Sentiment:", "Positive" if predicted_class == 1 else "Negative")
