from datasets import load_dataset
from transformers import AutoTokenizer, AutoModel, TrainingArguments, Trainer
from sklearn.metrics import accuracy_score
from torch import nn
import torch
import os
import time
import matplotlib.pyplot as plt
from torch.utils.data import Dataset

# 控制是否跳过训练并直接加载已训练好的模型
skip_training = False  # 设置为 True 跳过训练，False 重新训练

# 加载本地数据集（离线加载 Parquet 文件）
data_files = {
    "train": "./imdb/plain_text/train-00000-of-00001.parquet",
    "test": "./imdb/plain_text/test-00000-of-00001.parquet",
}
dataset = load_dataset("parquet", data_files=data_files)

# 模拟 IMDb 元数据（这里假设每条样本有 "rating" 和 "genre" 两个特征）
def add_metadata(example):
    import random
    genres = ["Drama", "Comedy", "Horror", "Action"]
    return {
        "rating": random.uniform(1.0, 10.0),  # 模拟评分 1~10
        "genre": random.choice(genres),  # 随机分配类别
        **example,  # 保留原始数据
    }

# 正确调用 map 来为每个样本添加元数据
dataset = dataset.map(add_metadata)

# 定义多模态数据集类
class MultimodalDataset(Dataset):
    def __init__(self, dataset, tokenizer, max_length):
        self.dataset = dataset
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.genre_to_id = {genre: i for i, genre in enumerate(["Drama", "Comedy", "Horror", "Action"])}

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        data = self.dataset[idx]
        text = data["text"]
        label = data["label"]
        rating = data["rating"]
        genre = self.genre_to_id[data["genre"]]

        # Tokenize text
        inputs = self.tokenizer(
            text, truncation=True, padding="max_length", max_length=self.max_length, return_tensors="pt"
        )

        # Return combined features
        return {
            "input_ids": inputs["input_ids"].squeeze(0),
            "attention_mask": inputs["attention_mask"].squeeze(0),
            "rating": torch.tensor(rating, dtype=torch.float),
            "genre": torch.tensor(genre, dtype=torch.long),
            "label": torch.tensor(label, dtype=torch.long),
        }


# 定义多模态模型
class MultimodalModel(nn.Module):
    def __init__(self, text_model_path, num_labels=2, genre_vocab_size=4):
        super().__init__()
        self.text_model = AutoModel.from_pretrained(text_model_path, local_files_only=True)
        self.genre_embedding = nn.Embedding(genre_vocab_size, 16)  # 对 genre 做嵌入
        self.fc_rating = nn.Linear(1, 16)  # 对 rating 做线性变换
        self.classifier = nn.Linear(self.text_model.config.hidden_size + 32, num_labels)

    def forward(self, input_ids, attention_mask, rating, genre, labels=None):
        # 文本特征
        text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
        text_features = text_outputs.last_hidden_state[:, 0, :]  # [CLS] token 的表示

        # 元数据特征
        genre_features = self.genre_embedding(genre)
        rating_features = self.fc_rating(rating.unsqueeze(-1))

        # 拼接所有特征
        combined_features = torch.cat([text_features, genre_features, rating_features], dim=-1)
        logits = self.classifier(combined_features)

        loss = None
        if labels is not None:
            loss = nn.CrossEntropyLoss()(logits, labels)

        return {"loss": loss, "logits": logits}


# 配置模型路径和训练参数
local_model_path = "./models/bert-large-uncased"
trained_model_path = "./results/trained_multimodal"

if not skip_training:
    # 如果需要重新训练，加载分词器和模型
    tokenizer = AutoTokenizer.from_pretrained(local_model_path, local_files_only=True)
    train_dataset = MultimodalDataset(dataset["train"], tokenizer, max_length=128)
    eval_dataset = MultimodalDataset(dataset["test"], tokenizer, max_length=128)
    model = MultimodalModel(local_model_path)

    # 配置训练参数
    training_args = TrainingArguments(
        output_dir="./results",
        evaluation_strategy="epoch",
        save_strategy="no",  # 不保存中间模型
        logging_strategy="epoch",
        learning_rate=5e-5,
        per_device_train_batch_size=16,
        gradient_accumulation_steps=2,
        num_train_epochs=5,
        weight_decay=0.01,
        fp16=True,
        dataloader_num_workers=8,
        gradient_checkpointing=True,
        report_to="none",
    )

    # 定义验证集准确率的记录列表
    eval_accuracies = []

    # 自定义 Trainer 回调以记录验证集准确率
    class CustomTrainer(Trainer):
        def evaluate(self, eval_dataset=None, ignore_keys=None, metric_key_prefix="eval"):
            eval_results = super().evaluate(eval_dataset=eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
            eval_accuracies.append(eval_results["eval_accuracy"])  # 记录验证集准确率
            return eval_results

    # 定义 Trainer
    trainer = CustomTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=lambda p: {"accuracy": accuracy_score(p.label_ids, p.predictions.argmax(axis=1))},
    )

    # 开始训练模型
    time_start = time.time()
    trainer.train()
    time_end = time.time()

    # 评估模型
    results = trainer.evaluate()
    print(f"Final Test Accuracy: {results['eval_accuracy']:.4f}")
    print(f"训练时间: {time_end - time_start:.2f} 秒")

    # 保存最终模型
    os.makedirs(trained_model_path, exist_ok=True)
    model.save_pretrained(trained_model_path)
    tokenizer.save_pretrained(trained_model_path)

    # 绘制验证集准确率曲线
    epochs = range(1, len(eval_accuracies) + 1)
    plt.figure(figsize=(10, 5))
    plt.plot(epochs, eval_accuracies, label="Validation Accuracy", marker="o")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    plt.title("Validation Accuracy Curve")
    plt.legend()
    plt.grid()
    plt.savefig(os.path.join(trained_model_path, "accuracy_curve_multimedia.png"))  # 保存准确率曲线图
    # plt.show()


else:
    # 如果跳过训练，加载已保存的模型
    model = MultimodalModel(local_model_path)
    model.load_state_dict(torch.load(os.path.join(trained_model_path, "pytorch_model.bin")))

# 测试单条输入文本
text = "I absolutely loved this movie! It was amazing and so well acted."
rating = 8.5
genre = "Drama"

inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
inputs["rating"] = torch.tensor(rating).unsqueeze(0)
inputs["genre"] = torch.tensor(train_dataset.genre_to_id[genre]).unsqueeze(0)

model.eval()
with torch.no_grad():
    outputs = model(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        rating=inputs["rating"],
        genre=inputs["genre"],
    )
    predicted_class = outputs["logits"].argmax().item()
    print("Predicted Sentiment:", "Positive" if predicted_class == 1 else "Negative")
