import os
import torch
import pandas as pd
from torch import nn, device
from torch.utils.data import Dataset, DataLoader
from transformers import BertModel, BertTokenizer
from transformers import get_linear_schedule_with_warmup
from torch.optim import AdamW
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm


# 1. 参数配置
class Config:
    BERT_MODEL_PATH = r"C:\Users\dell\Desktop\PythonProject\大作业\chinese\models--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f"  # 本地模型路径
    DATA_PATH = r"C:\Users\dell\Desktop\PythonProject\大作业\data\weibo_senti_100k.csv"  # 本地数据集路径
    MAX_LEN = 128
    BATCH_SIZE = 32
    HIDDEN_SIZE = 256
    NUM_LAYERS = 2
    DROPOUT = 0.3
    LEARNING_RATE = 2e-5
    EPOCHS = 4
    SEED = 42
    TEXT_COL = "review"  # 新增：指定文本列名
    LABEL_COL = "label"  # 新增：指定标签列名


# 2. 设置随机种子
torch.manual_seed(Config.SEED)
torch.cuda.manual_seed_all(Config.SEED)


# 3. 数据预处理
class WeiboDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = str(self.texts[idx])
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            padding="max_length",
            truncation=True,
            return_attention_mask=True,
            return_tensors="pt"
        )
        return {
            "input_ids": encoding["input_ids"].flatten(),
            "attention_mask": encoding["attention_mask"].flatten(),
            "labels": torch.tensor(self.labels[idx], dtype=torch.long)
        }


# 4. 模型定义
class BertBiLSTM(nn.Module):
    def __init__(self):
        super().__init__()
        # 从本地加载BERT模型
        self.bert = BertModel.from_pretrained(Config.BERT_MODEL_PATH)
        self.lstm = nn.LSTM(
            input_size=self.bert.config.hidden_size,
            hidden_size=Config.HIDDEN_SIZE,
            num_layers=Config.NUM_LAYERS,
            batch_first=True,
            bidirectional=True
        )
        self.dropout = nn.Dropout(Config.DROPOUT)
        self.classifier = nn.Linear(Config.HIDDEN_SIZE * 2, 2)  # 二分类

    def forward(self, input_ids, attention_mask):
        bert_output = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            return_dict=False
        )
        sequence_output = bert_output[0]
        lstm_output, _ = self.lstm(sequence_output)
        lstm_output = self.dropout(lstm_output[:, -1, :])
        return self.classifier(lstm_output)


# 5. 训练和评估函数
def train_epoch(model, dataloader, optimizer, scheduler, device):
    model.train()
    total_loss = 0
    correct = 0

    for batch in tqdm(dataloader, desc="Training"):
        input_ids = batch["input_ids"].to(device)
        attention_mask = batch["attention_mask"].to(device)
        labels = batch["labels"].to(device)

        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask)
        loss = nn.CrossEntropyLoss()(outputs, labels)
        loss.backward()
        optimizer.step()
        scheduler.step()

        total_loss += loss.item()
        _, preds = torch.max(outputs, dim=1)
        correct += (preds == labels).sum().item()

    return total_loss / len(dataloader), correct / len(dataloader.dataset)


def eval_model(model, dataloader, device):
    model.eval()
    total_loss = 0
    correct = 0
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            outputs = model(input_ids, attention_mask)
            loss = nn.CrossEntropyLoss()(outputs, labels)

            total_loss += loss.item()
            _, preds = torch.max(outputs, dim=1)
            correct += (preds == labels).sum().item()
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

    return total_loss / len(dataloader), correct / len(dataloader.dataset), all_preds, all_labels


# 6. 主函数
def main():
    # 检查GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 加载本地数据
    df = pd.read_csv(Config.DATA_PATH)
    print("数据列名:", df.columns.tolist())  # 调试：确认列名

    train_df, test_df = train_test_split(df, test_size=0.2, random_state=Config.SEED)
    train_df, val_df = train_test_split(train_df, test_size=0.1, random_state=Config.SEED)

    # 从本地加载tokenizer
    tokenizer = BertTokenizer.from_pretrained(Config.BERT_MODEL_PATH)

    # 创建数据集（使用Config中定义的列名）
    train_data = WeiboDataset(train_df[Config.TEXT_COL].values,
                              train_df[Config.LABEL_COL].values,
                              tokenizer, Config.MAX_LEN)
    val_data = WeiboDataset(val_df[Config.TEXT_COL].values,
                            val_df[Config.LABEL_COL].values,
                            tokenizer, Config.MAX_LEN)
    test_data = WeiboDataset(test_df[Config.TEXT_COL].values,
                             test_df[Config.LABEL_COL].values,
                             tokenizer, Config.MAX_LEN)

    # 创建数据加载器
    train_loader = DataLoader(train_data, batch_size=Config.BATCH_SIZE, shuffle=True)
    val_loader = DataLoader(val_data, batch_size=Config.BATCH_SIZE)
    test_loader = DataLoader(test_data, batch_size=Config.BATCH_SIZE)

    # 初始化模型
    model = BertBiLSTM().to(device)

    # 优化器和学习率调度
    optimizer = AdamW(model.parameters(), lr=Config.LEARNING_RATE)
    total_steps = len(train_loader) * Config.EPOCHS
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=0,
        num_training_steps=total_steps
    )

    # 训练循环
    best_acc = 0
    train_losses, val_losses = [], []
    train_accs, val_accs = [], []

    for epoch in range(Config.EPOCHS):
        print(f"\nEpoch {epoch + 1}/{Config.EPOCHS}")
        train_loss, train_acc = train_epoch(model, train_loader, optimizer, scheduler, device)
        val_loss, val_acc, _, _ = eval_model(model, val_loader, device)

        train_losses.append(train_loss)
        val_losses.append(val_loss)
        train_accs.append(train_acc)
        val_accs.append(val_acc)

        print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}")
        print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), "best_model.pt")
            print("Saved best model!")

    # 可视化
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label="Train")
    plt.plot(val_losses, label="Val")
    plt.title("Loss")
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label="Train")
    plt.plot(val_accs, label="Val")
    plt.title("Accuracy")
    plt.legend()
    plt.savefig("training_curve.png")
    plt.show()

    # 测试集评估
    model.load_state_dict(torch.load("best_model.pt"))
    test_loss, test_acc, preds, labels = eval_model(model, test_loader, device)
    print(f"\nTest Accuracy: {test_acc:.4f}")
    print(classification_report(labels, preds))

    # 混淆矩阵
    cm = confusion_matrix(labels, preds)
    plt.figure(figsize=(6, 6))
    sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
    plt.title("Confusion Matrix")
    plt.savefig("confusion_matrix.png")
    plt.show()


if __name__ == "__main__":
    main()