import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence

# 检测GPU是否可用
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")

# 下载必要的NLTK资源
try:
    nltk.data.find('tokenizers/punkt')
except LookupError:
    nltk.download('punkt')
try:
    nltk.data.find('corpora/stopwords')
except LookupError:
    nltk.download('stopwords')

# 文本预处理函数
def preprocess_text(text):
    """
    预处理文本：小写化、去除标点、去除停用词
    注意：保留表达情感的标点符号如感叹号
    """
    text = text.lower()
    # 保留感叹号，它们对情感分析很重要
    text = re.sub(r'[^\w\s!]', '', text)

    # 计算感叹号数量作为特征
    exclamation_count = text.count('!')

    # 去除标点进行分词
    text = re.sub(r'[^\w\s]', '', text)

    stop_words = set(stopwords.words('english'))
    # 保留否定词，它们对情感判断很重要
    negations = {'no', 'not', "don't", "doesn't", "didn't", "won't", "wouldn't", "can't", "couldn't", "shouldn't"}
    stop_words = stop_words - negations

    tokens = word_tokenize(text)
    tokens = [word for word in tokens if word not in stop_words]

    # 如果有感叹号，添加特殊标记
    if exclamation_count > 0:
        tokens.append('has_exclamation')
        if exclamation_count > 2:
            tokens.append('multiple_exclamation')

    return tokens

# 构建词汇表
class Vocabulary:
    def __init__(self, freq_threshold=1):
        """
        初始化词汇表
        freq_threshold: 词频阈值，只有出现次数大于等于阈值的词才会被加入词汇表
        """
        self.itos = {0: "<PAD>", 1: "<UNK>"}
        self.stoi = {"<PAD>": 0, "<UNK>": 1}
        self.freq_threshold = freq_threshold
        self.idx = 2  # 从2开始编号（0和1已被占用）

    def build_vocabulary(self, sentence_list):
        """
        根据句子列表构建词汇表
        """
        frequencies = Counter()
        for sentence in sentence_list:
            for word in sentence:
                frequencies[word] += 1

        for word, freq in frequencies.items():
            if freq >= self.freq_threshold:
                self.stoi[word] = self.idx
                self.itos[self.idx] = word
                self.idx += 1

    def numericalize(self, text):
        """
        将文本转换为数字序列
        """
        return [self.stoi.get(token, self.stoi["<UNK>"]) for token in text]

# 自定义数据集
class SentimentDataset(Dataset):
    def __init__(self, texts, labels, vocab):
        """
        初始化数据集
        texts: 文本列表
        labels: 标签列表（0: 负面, 1: 正面）
        vocab: 词汇表对象
        """
        self.texts = texts
        self.labels = labels
        self.vocab = vocab

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, index):
        text = self.texts[index]
        label = self.labels[index]

        # 文本预处理
        processed_text = preprocess_text(text)

        # 转换为数字序列
        numericalized_text = self.vocab.numericalize(processed_text)

        return torch.tensor(numericalized_text), torch.tensor(label, dtype=torch.float32)

# 数据加载器的collate函数
def collate_batch(batch):
    """
    将批次数据整理成模型输入格式
    """
    texts, labels = [], []
    for text, label in batch:
        texts.append(text)
        labels.append(label)

    # 填充序列
    texts = pad_sequence(texts, batch_first=True, padding_value=0)

    return texts.to(device), torch.tensor(labels, dtype=torch.float32).to(device)

# 增强的GRU模型
class GRUSentiment(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout, bidirectional=True):
        """
        初始化GRU模型
        vocab_size: 词汇表大小
        embedding_dim: 词嵌入维度
        hidden_dim: 隐藏层维度
        output_dim: 输出维度
        n_layers: GRU层数
        dropout: Dropout比例
        bidirectional: 是否使用双向GRU
        """
        super(GRUSentiment, self).__init__()

        self.bidirectional = bidirectional
        self.directions = 2 if bidirectional else 1

        # 嵌入层初始化增加正则化
        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)

        # 添加嵌入层Dropout
        self.embed_dropout = nn.Dropout(0.3)

        # GRU层
        self.gru = nn.GRU(embedding_dim,
                         hidden_dim,
                         num_layers=n_layers,
                         bidirectional=bidirectional,
                         dropout=dropout if n_layers > 1 else 0,
                         batch_first=True)

        # 多头注意力机制
        self.attention1 = nn.Linear(hidden_dim * self.directions, 1)
        self.attention2 = nn.Linear(hidden_dim * self.directions, 1)

        # 输出层
        self.fc = nn.Linear(hidden_dim * self.directions * 2, 128)  # 双重注意力输出
        self.bn = nn.BatchNorm1d(128)  # 添加批量归一化
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, output_dim)

        # Dropout层
        self.dropout = nn.Dropout(dropout)

        # 激活函数
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

    def attention_net(self, gru_output, attention_layer):
        """
        注意力机制
        gru_output: GRU输出 [batch_size, seq_len, hidden_dim * directions]
        attention_layer: 注意力层
        """
        # 计算注意力权重
        attn_weights = attention_layer(gru_output)  # [batch_size, seq_len, 1]
        attn_weights = torch.tanh(attn_weights)
        attn_weights = torch.softmax(attn_weights, dim=1)  # [batch_size, seq_len, 1]

        # 应用注意力权重
        context = torch.bmm(gru_output.transpose(1, 2), attn_weights)  # [batch_size, hidden_dim * directions, 1]
        context = context.squeeze(2)  # [batch_size, hidden_dim * directions]

        return context

    def forward(self, text):
        """
        前向传播
        text: 输入文本的数字序列，形状为 [batch_size, seq_len]
        """
        # 检查输入是否为空
        if text.size(1) == 0:  # 序列长度为0
            batch_size = text.size(0)
            return torch.zeros(batch_size, 1).to(text.device)

        # 词嵌入，形状为 [batch_size, seq_len, embedding_dim]
        embedded = self.embedding(text)
        embedded = self.embed_dropout(embedded)

        # GRU输出，output形状为 [batch_size, seq_len, hidden_dim * directions]
        output, hidden = self.gru(embedded)

        # 应用双重注意力机制
        context1 = self.attention_net(output, self.attention1)
        context2 = self.attention_net(output, self.attention2)

        # 连接两个注意力输出
        combined_context = torch.cat((context1, context2), dim=1)

        # 全连接层
        dense1 = self.fc(combined_context)
        dense1 = self.bn(dense1)  # 批量归一化
        dense1 = self.relu(dense1)
        dense1 = self.dropout(dense1)

        # 第二个全连接层
        dense2 = self.fc2(dense1)
        dense2 = self.relu(dense2)
        dense2 = self.dropout(dense2)

        # 最终输出层
        output = self.fc3(dense2)
        output = self.sigmoid(output)

        return output

# 训练函数
def train(model, dataloader, optimizer, criterion, clip=1.0):
    """
    训练一个epoch
    """
    model.train()
    epoch_loss = 0
    epoch_acc = 0

    for texts, labels in dataloader:
        optimizer.zero_grad()

        # 前向传播
        predictions = model(texts).squeeze(1)

        # 计算损失 - BCEWithLogitsLoss自动应用sigmoid
        loss = criterion(predictions, labels)

        # 反向传播和优化
        loss.backward()

        # 梯度裁剪，防止梯度爆炸
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)

        optimizer.step()

        # 计算准确率 - 需要手动应用sigmoid
        predictions = torch.sigmoid(predictions)
        predictions = (predictions >= 0.5).float()
        correct = (predictions == labels).float().sum()
        accuracy = correct / len(labels)

        epoch_loss += loss.item()
        epoch_acc += accuracy.item()

    return epoch_loss / len(dataloader), epoch_acc / len(dataloader)

# 评估函数
def evaluate(model, dataloader, criterion):
    """
    评估模型
    """
    model.eval()
    epoch_loss = 0
    epoch_acc = 0

    with torch.no_grad():
        for texts, labels in dataloader:
            # 前向传播
            predictions = model(texts).squeeze(1)

            # 计算损失
            loss = criterion(predictions, labels)

            # 计算准确率
            predictions = torch.sigmoid(predictions)
            predictions = (predictions >= 0.5).float()
            correct = (predictions == labels).float().sum()
            accuracy = correct / len(labels)

            epoch_loss += loss.item()
            epoch_acc += accuracy.item()

    return epoch_loss / len(dataloader), epoch_acc / len(dataloader)

# 生成扩展的训练数据
def generate_sample_data():
    """
    生成情感分析的示例数据（扩展版）
    """
    texts = [
        # 原始数据
        "I love this movie! It's amazing and the actors are great.",
        "This is a terrible film. The script is awful and the acting is poor.",
        "The graphics were stunning and the storyline was captivating.",
        "I was disappointed with the plot and found the characters boring.",
        "This book is fantastic! I couldn't put it down.",
        "What a waste of time. The book was dull and predictable.",
        "The restaurant has delicious food and excellent service.",
        "The food was cold and the staff was rude.",
        "The hotel room was clean and comfortable.",
        "The room was dirty and the bed was uncomfortable.",

        # 添加更多正面评价
        "I really enjoyed this movie, it was fantastic!",
        "This film exceeded all my expectations!",
        "What a wonderful experience, I highly recommend it!",
        "The best movie I've seen in years! Absolutely loved it.",
        "Great acting, amazing plot, and beautiful cinematography!",
        "This book changed my life! It's brilliantly written.",
        "The service was impeccable and the staff very friendly!",
        "Such an amazing product, works exactly as described.",
        "This is everything I hoped for and more! So happy with my purchase.",
        "Excellent quality and fast delivery. Will buy again!",

        # 添加更多负面评价
        "I didn't like this movie at all. Very disappointing.",
        "The worst experience ever. Don't waste your money.",
        "This product broke after one use. Poor quality.",
        "Terrible customer service, they didn't resolve my issue.",
        "The food was bland and overpriced. Won't go back.",
        "This book was so boring I couldn't finish it.",
        "Nothing works as advertised. Complete scam.",
        "Waited two hours for service and the food was cold.",
        "The movie plot made no sense and the ending was terrible.",
        "Slow shipping and the item arrived damaged."
    ]

    # 对应的标签：1表示正面，0表示负面
    labels = [
        # 原始数据标签
        1, 0, 1, 0, 1, 0, 1, 0, 1, 0,
        # 新增正面评价标签
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
        # 新增负面评价标签
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]

    return texts, labels

# 使用预训练词向量
def load_pretrained_embeddings(vocab, embedding_dim=100):
    """
    加载预训练的词向量（这里仅模拟，实际应用中可使用GloVe等）
    """
    # 创建一个随机初始化的嵌入矩阵
    weights = torch.randn(len(vocab.stoi), embedding_dim)

    # 确保PAD的词向量都是0
    weights[0] = torch.zeros(embedding_dim)

    # 为情感相关词添加特殊初始化
    positive_words = ['good', 'great', 'excellent', 'love', 'amazing', 'fantastic', 'wonderful', 'best', 'enjoy', 'enjoyed']
    negative_words = ['bad', 'terrible', 'awful', 'poor', 'disappointing', 'waste', 'worst', 'boring', 'cold', 'dirty']

    # 为正面词赋予相似的向量表示
    for word in positive_words:
        if word in vocab.stoi:
            weights[vocab.stoi[word]] = torch.ones(embedding_dim) * 0.1 + torch.randn(embedding_dim) * 0.01

    # 为负面词赋予相似的向量表示
    for word in negative_words:
        if word in vocab.stoi:
            weights[vocab.stoi[word]] = -torch.ones(embedding_dim) * 0.1 + torch.randn(embedding_dim) * 0.01

    return weights

# 主函数
def main():
    # 生成示例数据（扩展版）
    texts, labels = generate_sample_data()

    # 预处理所有文本
    processed_texts = [preprocess_text(text) for text in texts]

    # 构建词汇表
    vocab = Vocabulary(freq_threshold=1)
    vocab.build_vocabulary(processed_texts)

    print(f"词汇表大小: {len(vocab.stoi)}")

    # 获取模拟的预训练词向量
    pretrained_weights = load_pretrained_embeddings(vocab, embedding_dim=100)

    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(texts, labels, test_size=0.2, random_state=42, stratify=labels)

    # 创建数据集
    train_dataset = SentimentDataset(X_train, y_train, vocab)
    val_dataset = SentimentDataset(X_val, y_val, vocab)

    # 创建数据加载器
    batch_size = 16  # 增大批次大小
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_batch)

    # 初始化模型参数
    vocab_size = len(vocab.stoi)
    embedding_dim = 100
    hidden_dim = 128  # 减小隐藏层维度，减少过拟合
    output_dim = 1
    n_layers = 2
    dropout = 0.3  # 降低dropout率

    # 初始化模型
    model = GRUSentiment(vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout).to(device)

    # 加载预训练词向量
    model.embedding.weight.data.copy_(pretrained_weights)

    # 初始化损失函数，使用加权BCE，对负面样本给予更高权重
    # 这有助于解决模型偏向预测正面的问题
    pos_weight = torch.tensor([1.2]).to(device)  # 给负样本1.2倍权重
    criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)

    # 使用AdamW优化器，具有更好的权重衰减处理
    optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-4)

    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True)

    # 训练模型
    num_epochs = 50  # 增加训练轮次
    best_val_loss = float('inf')
    patience = 5  # 早停的耐心值
    patience_counter = 0

    for epoch in range(num_epochs):
        # 训练
        train_loss, train_acc = train(model, train_dataloader, optimizer, criterion)

        # 验证
        val_loss, val_acc = evaluate(model, val_dataloader, criterion)

        # 更新学习率
        scheduler.step(val_loss)

        # 打印结果
        print(f'Epoch {epoch+1}/{num_epochs}')
        print(f'\tTrain Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}')
        print(f'\tVal Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}')

        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), 'best_model.pt')
            print(f'\t模型已保存!')
            patience_counter = 0
        else:
            patience_counter += 1

        # 早停
        if patience_counter >= patience:
            print(f'Early stopping after {epoch+1} epochs')
            break

    # 加载最佳模型
    model.load_state_dict(torch.load('best_model.pt'))

    # 测试模型
    test_texts = [
        "I really enjoyed this movie, it was fantastic!",  # 明显正面
        "This is the worst film I've ever seen.",          # 明显负面
        "The product was okay, not great but not bad.",    # 中性
        "I don't recommend this restaurant at all.",       # 带否定词的负面
        "This isn't as good as I expected, but it's not terrible." # 复杂情感
    ]

    model.eval()
    for test_text in test_texts:
        test_processed = preprocess_text(test_text)
        print(f"处理后的词语: {test_processed}")
        test_numericalized = vocab.numericalize(test_processed)
        test_tensor = torch.tensor(test_numericalized).unsqueeze(0).to(device)

        with torch.no_grad():
            prediction = model(test_tensor).squeeze(1)
            sentiment = "正面" if prediction.item() >= 0.5 else "负面"
            confidence = max(prediction.item(), 1 - prediction.item())
            print(f'测试文本: "{test_text}"')
            print(f'情感预测: {sentiment} (置信度: {confidence:.2f})')
            print(f'情感得分: {prediction.item():.4f}')
            print('-' * 50)

# 运行主函数
if __name__ == "__main__":
    main()
