"""
1-gram深度学习分类器
使用预训练词向量进行句子分类，展示不同的pooling策略
"""

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from collections import defaultdict
import gensim.downloader as api
import re
import jieba
from typing import List, Tuple, Dict
import matplotlib.pyplot as plt
import seaborn as sns

class TextDataset(Dataset):
    """文本数据集类"""
    
    def __init__(self, texts: List[str], labels: List[int], word_vectors, max_length: int = 50):
        self.texts = texts
        self.labels = labels
        self.word_vectors = word_vectors
        self.max_length = max_length
        self.vector_dim = word_vectors.vector_size
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        
        # 分词并转换为词向量
        words = jieba.lcut(text.lower())
        vectors = []
        
        for word in words[:self.max_length]:  # 限制最大长度
            if word in self.word_vectors:
                vectors.append(self.word_vectors[word])
            else:
                # 未知词使用零向量
                vectors.append(np.zeros(self.vector_dim))
        
        # 填充或截断到固定长度
        while len(vectors) < self.max_length:
            vectors.append(np.zeros(self.vector_dim))
        
        vectors = np.array(vectors[:self.max_length])
        
        return torch.FloatTensor(vectors), torch.LongTensor([label])

class PoolingLayer(nn.Module):
    """不同pooling策略的实现"""
    
    def __init__(self, pooling_type: str = 'mean'):
        super(PoolingLayer, self).__init__()
        self.pooling_type = pooling_type
        
    def forward(self, x):
        """
        x: (batch_size, seq_len, hidden_dim)
        """
        if self.pooling_type == 'mean':
            # 平均池化
            return torch.mean(x, dim=1)
        elif self.pooling_type == 'max':
            # 最大池化
            return torch.max(x, dim=1)[0]
        elif self.pooling_type == 'sum':
            # 求和池化
            return torch.sum(x, dim=1)
        elif self.pooling_type == 'first':
            # 取第一个token
            return x[:, 0, :]
        elif self.pooling_type == 'last':
            # 取最后一个token
            return x[:, -1, :]
        else:
            raise ValueError(f"Unsupported pooling type: {self.pooling_type}")

class AttentionPooling(nn.Module):
    """注意力池化层"""
    
    def __init__(self, hidden_dim: int):
        super(AttentionPooling, self).__init__()
        self.attention = nn.Linear(hidden_dim, 1)
        
    def forward(self, x):
        """
        x: (batch_size, seq_len, hidden_dim)
        """
        # 计算注意力权重
        attention_weights = F.softmax(self.attention(x), dim=1)  # (batch_size, seq_len, 1)
        
        # 加权求和
        weighted_sum = torch.sum(attention_weights * x, dim=1)  # (batch_size, hidden_dim)
        
        return weighted_sum

class OneGramDeepClassifier(nn.Module):
    """1-gram深度学习分类器"""
    
    def __init__(self, vector_dim: int, num_classes: int, hidden_dim: int = 128, 
                 pooling_type: str = 'mean', dropout_rate: float = 0.3):
        super(OneGramDeepClassifier, self).__init__()
        
        self.vector_dim = vector_dim
        self.num_classes = num_classes
        self.pooling_type = pooling_type
        
        # 特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(vector_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate)
        )
        
        # Pooling层
        if pooling_type == 'attention':
            self.pooling = AttentionPooling(hidden_dim)
        else:
            self.pooling = PoolingLayer(pooling_type)
        
        # 分类层
        self.classifier = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim // 2, num_classes)
        )
        
    def forward(self, x):
        """
        x: (batch_size, seq_len, vector_dim)
        """
        batch_size, seq_len, _ = x.shape
        
        # 对每个词向量进行特征提取
        x_reshaped = x.view(-1, self.vector_dim)  # (batch_size * seq_len, vector_dim)
        features = self.feature_extractor(x_reshaped)  # (batch_size * seq_len, hidden_dim)
        features = features.view(batch_size, seq_len, -1)  # (batch_size, seq_len, hidden_dim)
        
        # Pooling
        pooled_features = self.pooling(features)  # (batch_size, hidden_dim)
        
        # 分类
        logits = self.classifier(pooled_features)  # (batch_size, num_classes)
        
        return logits

class OneGramTrainer:
    """1-gram分类器训练器"""
    
    def __init__(self, model, device='cpu'):
        self.model = model
        self.device = device
        self.model.to(device)
        
        self.train_losses = []
        self.train_accuracies = []
        self.val_losses = []
        self.val_accuracies = []
        
    def train_epoch(self, train_loader, optimizer, criterion):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        correct = 0
        total = 0
        
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(self.device), target.squeeze().to(self.device)
            
            optimizer.zero_grad()
            output = self.model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            pred = output.argmax(dim=1)
            correct += pred.eq(target).sum().item()
            total += target.size(0)
            
        avg_loss = total_loss / len(train_loader)
        accuracy = 100. * correct / total
        
        return avg_loss, accuracy
    
    def validate(self, val_loader, criterion):
        """验证模型"""
        self.model.eval()
        total_loss = 0
        correct = 0
        total = 0
        
        with torch.no_grad():
            for data, target in val_loader:
                data, target = data.to(self.device), target.squeeze().to(self.device)
                output = self.model(data)
                loss = criterion(output, target)
                
                total_loss += loss.item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target).sum().item()
                total += target.size(0)
        
        avg_loss = total_loss / len(val_loader)
        accuracy = 100. * correct / total
        
        return avg_loss, accuracy
    
    def train(self, train_loader, val_loader, epochs=10, lr=0.001):
        """完整训练流程"""
        optimizer = optim.Adam(self.model.parameters(), lr=lr)
        criterion = nn.CrossEntropyLoss()
        
        print(f"开始训练，使用设备: {self.device}")
        print(f"模型参数数量: {sum(p.numel() for p in self.model.parameters())}")
        print(f"Pooling策略: {self.model.pooling_type}")
        print("-" * 50)
        
        for epoch in range(epochs):
            # 训练
            train_loss, train_acc = self.train_epoch(train_loader, optimizer, criterion)
            self.train_losses.append(train_loss)
            self.train_accuracies.append(train_acc)
            
            # 验证
            val_loss, val_acc = self.validate(val_loader, criterion)
            self.val_losses.append(val_loss)
            self.val_accuracies.append(val_acc)
            
            print(f'Epoch {epoch+1}/{epochs}:')
            print(f'  训练 - Loss: {train_loss:.4f}, Acc: {train_acc:.2f}%')
            print(f'  验证 - Loss: {val_loss:.4f}, Acc: {val_acc:.2f}%')
            print()
    
    def plot_training_history(self):
        """绘制训练历史"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
        
        # 损失曲线
        ax1.plot(self.train_losses, label='训练损失')
        ax1.plot(self.val_losses, label='验证损失')
        ax1.set_title('损失曲线')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.legend()
        ax1.grid(True)
        
        # 准确率曲线
        ax2.plot(self.train_accuracies, label='训练准确率')
        ax2.plot(self.val_accuracies, label='验证准确率')
        ax2.set_title('准确率曲线')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Accuracy (%)')
        ax2.legend()
        ax2.grid(True)
        
        plt.tight_layout()
        plt.show()

def create_sample_data():
    """创建示例数据集"""
    # 情感分析数据集示例
    positive_texts = [
        "这个电影真的很好看，我很喜欢",
        "服务态度非常好，推荐大家来",
        "质量很棒，物超所值",
        "今天心情很好，阳光明媚",
        "这家餐厅的菜品很美味",
        "工作进展顺利，很满意",
        "朋友们都很友善，感谢大家",
        "这本书写得很精彩",
        "天气很好，适合出游",
        "产品功能强大，使用方便"
    ]
    
    negative_texts = [
        "这个产品质量太差了",
        "服务态度很恶劣，不推荐",
        "价格太贵，性价比不高",
        "今天心情不好，很烦躁",
        "这家店的食物很难吃",
        "工作压力很大，很累",
        "遇到了很多困难和挫折",
        "这部电影很无聊",
        "天气很糟糕，下雨了",
        "软件bug太多，体验很差"
    ]
    
    neutral_texts = [
        "今天去了超市买东西",
        "明天有个会议要参加",
        "这是一个普通的产品",
        "天气预报说明天多云",
        "我在学习新的技术",
        "公司在市中心的位置",
        "这本书有300页",
        "电影时长两个小时",
        "产品有多种颜色可选",
        "会议定在下午三点"
    ]
    
    # 组合数据
    texts = positive_texts + negative_texts + neutral_texts
    labels = [1] * len(positive_texts) + [0] * len(negative_texts) + [2] * len(neutral_texts)
    
    return texts, labels

def compare_pooling_strategies(texts, labels, word_vectors):
    """比较不同pooling策略的效果"""
    pooling_strategies = ['mean', 'max', 'sum', 'attention']
    results = {}
    
    # 准备数据
    X_train, X_test, y_train, y_test = train_test_split(
        texts, labels, test_size=0.3, random_state=42, stratify=labels
    )
    
    for pooling_type in pooling_strategies:
        print(f"\n{'='*20} 测试 {pooling_type} pooling {'='*20}")
        
        # 创建数据集
        train_dataset = TextDataset(X_train, y_train, word_vectors)
        test_dataset = TextDataset(X_test, y_test, word_vectors)
        
        train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
        test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False)
        
        # 创建模型
        model = OneGramDeepClassifier(
            vector_dim=word_vectors.vector_size,
            num_classes=3,
            hidden_dim=64,
            pooling_type=pooling_type,
            dropout_rate=0.2
        )
        
        # 训练
        trainer = OneGramTrainer(model)
        trainer.train(train_loader, test_loader, epochs=15, lr=0.001)
        
        # 记录结果
        final_val_acc = trainer.val_accuracies[-1]
        results[pooling_type] = {
            'final_accuracy': final_val_acc,
            'train_history': trainer.train_accuracies,
            'val_history': trainer.val_accuracies
        }
        
        print(f"{pooling_type} pooling 最终验证准确率: {final_val_acc:.2f}%")
    
    return results

def main():
    """主函数"""
    print("1-gram深度学习分类器演示")
    print("=" * 50)
    
    # 1. 加载预训练词向量
    print("正在加载预训练词向量...")
    try:
        # 尝试加载中文词向量，如果没有则使用英文词向量
        word_vectors = api.load('word2vec-google-news-300')
        print("成功加载 Word2Vec 词向量")
    except:
        print("无法加载预训练词向量，将使用随机初始化")
        # 创建一个简单的词向量字典用于演示
        vocab = set()
        texts, _ = create_sample_data()
        for text in texts:
            vocab.update(jieba.lcut(text.lower()))
        
        class SimpleWordVectors:
            def __init__(self, vocab, vector_size=100):
                self.vector_size = vector_size
                self.vectors = {word: np.random.randn(vector_size) for word in vocab}
            
            def __contains__(self, word):
                return word in self.vectors
            
            def __getitem__(self, word):
                return self.vectors.get(word, np.zeros(self.vector_size))
        
        word_vectors = SimpleWordVectors(vocab)
    
    # 2. 创建示例数据
    print("\n创建示例数据集...")
    texts, labels = create_sample_data()
    print(f"数据集大小: {len(texts)} 条文本")
    print(f"类别分布: {dict(zip(*np.unique(labels, return_counts=True)))}")
    
    # 3. 比较不同pooling策略
    print("\n开始比较不同pooling策略...")
    results = compare_pooling_strategies(texts, labels, word_vectors)
    
    # 4. 展示结果
    print("\n" + "=" * 50)
    print("不同Pooling策略性能对比:")
    print("-" * 50)
    
    for pooling_type, result in results.items():
        print(f"{pooling_type:>10} pooling: {result['final_accuracy']:>6.2f}%")
    
    # 5. 绘制对比图
    plt.figure(figsize=(12, 8))
    
    # 准确率对比
    plt.subplot(2, 2, 1)
    pooling_types = list(results.keys())
    accuracies = [results[pt]['final_accuracy'] for pt in pooling_types]
    bars = plt.bar(pooling_types, accuracies)
    plt.title('不同Pooling策略的最终准确率对比')
    plt.ylabel('准确率 (%)')
    plt.ylim(0, 100)
    
    # 为每个柱子添加数值标签
    for bar, acc in zip(bars, accuracies):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 1, 
                f'{acc:.1f}%', ha='center', va='bottom')
    
    # 训练曲线对比
    plt.subplot(2, 2, 2)
    for pooling_type, result in results.items():
        plt.plot(result['val_history'], label=f'{pooling_type} pooling')
    plt.title('验证准确率训练曲线')
    plt.xlabel('Epoch')
    plt.ylabel('准确率 (%)')
    plt.legend()
    plt.grid(True)
    
    # Pooling策略说明
    plt.subplot(2, 1, 2)
    plt.axis('off')
    explanation = """
Pooling策略说明:

• Mean Pooling: 对序列中所有词向量求平均，保留全局信息
• Max Pooling: 取序列中每个维度的最大值，突出显著特征  
• Sum Pooling: 对序列中所有词向量求和，累积特征信息
• Attention Pooling: 使用注意力机制加权求和，自动学习重要性权重

深度学习优势:
1. 自动特征学习：无需手工设计特征，模型自动学习文本表示
2. 非线性建模：通过多层神经网络捕获复杂的语义关系
3. 端到端训练：从原始文本到分类结果的统一优化
4. 泛化能力强：在大规模数据上训练的模型具有更好的泛化性能
    """
    plt.text(0.05, 0.95, explanation, transform=plt.gca().transAxes, 
             fontsize=10, verticalalignment='top', fontfamily='SimHei')
    
    plt.tight_layout()
    plt.show()
    
    print("\n实验完成！")
    print("\n关键概念总结:")
    print("1. 1-gram: 将文本看作独立词汇的集合，不考虑词序")
    print("2. 预训练词向量: 使用大规模语料预训练的词向量，包含丰富语义信息")
    print("3. Pooling: 将变长序列转换为固定长度表示的技术")
    print("4. 深度学习: 通过多层神经网络自动学习特征表示")

if __name__ == "__main__":
    main()