import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import re
from collections import Counter
import random
import json
import os
from typing import List, Tuple, Dict, Set

# 本地英文停用词列表 (与原文件相同)
ENGLISH_STOP_WORDS = {
    'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", 
    "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 
    'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', 
    "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 
    'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 
    'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 
    'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 
    'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 
    'for', 'with', 'through', 'during', 'before', 'after', 'above', 'below', 
    'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 
    'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 
    'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 
    'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 
    's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 
    'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 
    'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', 
    "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 
    'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', 
    "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 
    'won', "won't", 'wouldn', "wouldn't"
}

def simple_tokenize(text: str) -> List[str]:
    """简单的文本分词函数"""
    text = text.lower()
    tokens = re.findall(r'\b[a-z]+\b', text)
    return tokens

def extract_ngrams(tokens: List[str], n: int) -> List[str]:
    """提取n-gram"""
    if len(tokens) < n:
        return []
    ngrams_list = []
    for i in range(len(tokens) - n + 1):
        ngram = '_'.join(tokens[i:i+n])
        ngrams_list.append(ngram)
    return ngrams_list

class NGramFeatureExtractorPyTorch:
    """N-gram特征提取器，输出PyTorch张量，支持TF-IDF权重"""
    
    def __init__(self, n_range=(1, 3), max_features=10000, min_freq=2, use_tfidf=True):
        self.n_range = n_range
        self.max_features = max_features
        self.min_freq = min_freq
        self.use_tfidf = use_tfidf
        self.feature_to_idx = {}
        self.vocab_size = 0
        self.stop_words = ENGLISH_STOP_WORDS
        self.document_frequencies = {}
        self.total_documents = 0
    
    def preprocess_text(self, text: str) -> List[str]:
        # 改进的文本预处理
        text = text.lower()
        # 保留更多有用的标点符号信息
        text = re.sub(r'[^\w\s!?.,]', ' ', text)
        tokens = re.findall(r'\b[a-z]+\b|[!?.,]', text)
        # 过滤停用词但保留重要的情感词
        important_words = {'not', 'no', 'never', 'very', 'really', 'quite', 'extremely'}
        tokens = [w for w in tokens if (w not in self.stop_words or w in important_words) and len(w) > 1]
        return tokens
    
    def extract_ngrams(self, tokens: List[str]) -> List[str]:
        all_ngrams = []
        for n in range(self.n_range[0], self.n_range[1] + 1):
            n_grams = extract_ngrams(tokens, n)
            all_ngrams.extend(n_grams)
        return all_ngrams
    
    def build_vocabulary(self, texts: List[str]):
        print("正在构建词汇表...")
        ngram_counts = Counter()
        document_ngrams = []
        
        # 收集所有文档的n-gram
        for text in texts:
            tokens = self.preprocess_text(text)
            text_ngrams = self.extract_ngrams(tokens)
            ngram_counts.update(text_ngrams)
            document_ngrams.append(set(text_ngrams))
        
        # 计算文档频率
        if self.use_tfidf:
            self.total_documents = len(texts)
            for ngram in ngram_counts:
                df = sum(1 for doc_ngrams in document_ngrams if ngram in doc_ngrams)
                self.document_frequencies[ngram] = df
        
        # 过滤低频n-gram
        filtered_ngrams = {ngram: count for ngram, count in ngram_counts.items() 
                          if count >= self.min_freq}
        
        # 选择top特征
        top_ngrams = sorted(filtered_ngrams.items(), key=lambda x: x[1], reverse=True)
        top_ngrams = top_ngrams[:self.max_features]
        
        self.feature_to_idx = {ngram: idx for idx, (ngram, _) in enumerate(top_ngrams)}
        self.vocab_size = len(self.feature_to_idx)
        
        print(f"词汇表大小: {self.vocab_size}")
        print(f"使用TF-IDF: {self.use_tfidf}")
        print(f"前10个特征: {list(self.feature_to_idx.keys())[:10]}")
    
    def text_to_features(self, text: str) -> torch.Tensor:
        tokens = self.preprocess_text(text)
        text_ngrams = self.extract_ngrams(tokens)
        
        feature_vector = torch.zeros(self.vocab_size, dtype=torch.float32)
        ngram_counts = Counter(text_ngrams)
        total_ngrams = len(text_ngrams)
        
        for ngram, count in ngram_counts.items():
            if ngram in self.feature_to_idx:
                idx = self.feature_to_idx[ngram]
                if self.use_tfidf and total_ngrams > 0:
                    # 计算TF-IDF
                    tf = count / total_ngrams
                    df = self.document_frequencies.get(ngram, 1)
                    idf = np.log(self.total_documents / df)
                    feature_vector[idx] = tf * idf
                else:
                    feature_vector[idx] = count
        
        # L2归一化
        norm = torch.norm(feature_vector)
        if norm > 0:
            feature_vector = feature_vector / norm
            
        return feature_vector
    
    def texts_to_features(self, texts: List[str]) -> torch.Tensor:
        print("正在提取特征...")
        features = []
        batch_size = 500  # 批量处理以提高效率
        
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            batch_features = []
            
            for text in batch_texts:
                batch_features.append(self.text_to_features(text))
            
            features.extend(batch_features)
            
            if i % 1000 == 0:
                print(f"处理进度: {i}/{len(texts)}")
        
        return torch.stack(features)


class MaxEntClassifierPyTorch(nn.Module):
    """改进的基于PyTorch的最大熵分类器，添加dropout和更好的初始化"""
    
    def __init__(self, num_classes: int, feature_dim: int, dropout_rate: float = 0.3):
        super(MaxEntClassifierPyTorch, self).__init__()
        self.dropout = nn.Dropout(dropout_rate)
        self.linear = nn.Linear(feature_dim, num_classes)
        
        # 改进的权重初始化
        nn.init.xavier_uniform_(self.linear.weight)
        nn.init.zeros_(self.linear.bias)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.dropout(x)
        return self.linear(x)


class TextDataset(Dataset):
    """自定义PyTorch数据集"""
    def __init__(self, features: torch.Tensor, labels: torch.Tensor):
        self.features = features
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]


def create_demo_dataset():
    """创建演示数据集 (与原文件相同)"""
    print("创建演示数据集...")
    
    positive_texts = [
        "I love this movie it's amazing and wonderful",
        "This is the best film I have ever seen fantastic",
        "Excellent acting and great story highly recommended",
        "Beautiful cinematography and outstanding performance",
        "Absolutely brilliant work of art love it",
        "Perfect movie with great direction and script",
        "Amazing storyline and excellent character development",
        "Outstanding film with beautiful visuals and music",
        "Incredible performance by all actors wonderful",
        "Magnificent movie experience truly enjoyed it",
        "Superb acting and direction really impressed me",
        "Fantastic movie with great plot and characters",
        "Wonderful film highly recommend to everyone",
        "Excellent cinematography and brilliant acting",
        "Amazing story with outstanding visual effects"
    ] * 15
    
    negative_texts = [
        "This movie is terrible and boring waste of time",
        "Awful acting and poor story completely disappointed",
        "Worst film ever made absolutely horrible",
        "Bad direction and terrible script not recommended",
        "Completely boring and uninteresting movie",
        "Poor performance by actors and weak plot",
        "Disappointing movie with bad cinematography",
        "Terrible waste of time and money avoid it",
        "Awful storyline and poor character development",
        "Horrible movie experience completely disappointed",
        "Terrible acting and boring plot disaster",
        "Worst movie ever seen complete waste time",
        "Awful film with terrible direction and script",
        "Poor quality movie with bad acting",
        "Disappointing film avoid at all costs"
    ] * 15
    
    neutral_texts = [
        "The movie was okay nothing special but watchable",
        "Average film with decent acting and story",
        "It's an ordinary movie not bad not great",
        "The film has some good moments but overall average",
        "Acceptable movie with reasonable story and acting",
        "Not the best but not the worst either",
        "Average performance by actors and decent plot",
        "Ordinary film with standard cinematography",
        "The movie is fine but nothing outstanding",
        "Decent movie experience neither good nor bad",
        "Acceptable film with average story and acting",
        "Standard movie nothing particularly special",
        "Ordinary film with decent but not great plot",
        "Average movie experience could be better",
        "Fair movie with reasonable story development"
    ] * 15
    
    texts = positive_texts + negative_texts + neutral_texts
    labels = [1] * len(positive_texts) + [0] * len(negative_texts) + [2] * len(neutral_texts)
    
    combined = list(zip(texts, labels))
    random.shuffle(combined)
    texts, labels = zip(*combined)
    
    return list(texts), list(labels)


def load_sst5_dataset(data_dir: str) -> Tuple[Tuple[List[str], List[int]], Tuple[List[str], List[int]], Tuple[List[str], List[int]]]:
    """
    加载SST-5数据集
    
    Args:
        data_dir: SST-5数据集目录路径
        
    Returns:
        ((train_texts, train_labels), (dev_texts, dev_labels), (test_texts, test_labels))
    """
    def load_jsonl(file_path: str) -> Tuple[List[str], List[int]]:
        """加载JSONL格式文件"""
        texts = []
        labels = []
        
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
            
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line:
                    data = json.loads(line)
                    texts.append(data['text'])
                    labels.append(data['label'])
        
        return texts, labels
    
    # 加载训练集、验证集和测试集
    train_file = os.path.join(data_dir, 'train.jsonl')
    dev_file = os.path.join(data_dir, 'dev.jsonl')
    test_file = os.path.join(data_dir, 'test.jsonl')
    
    print("正在加载SST-5数据集...")
    
    train_texts, train_labels = load_jsonl(train_file)
    dev_texts, dev_labels = load_jsonl(dev_file)
    test_texts, test_labels = load_jsonl(test_file)
    
    print(f"训练集: {len(train_texts)} 样本")
    print(f"验证集: {len(dev_texts)} 样本")
    print(f"测试集: {len(test_texts)} 样本")
    
    # 打印标签分布
    print("\n标签分布:")
    print("训练集:", Counter(train_labels))
    print("验证集:", Counter(dev_labels))
    print("测试集:", Counter(test_labels))
    
    # 验证标签范围
    all_labels = set(train_labels + dev_labels + test_labels)
    expected_labels = {0, 1, 2, 3, 4}
    if all_labels != expected_labels:
        print(f"警告: 发现意外的标签值 {all_labels - expected_labels}")
    
    return (train_texts, train_labels), (dev_texts, dev_labels), (test_texts, test_labels)


def train_model(model: MaxEntClassifierPyTorch, train_loader: DataLoader, val_loader: DataLoader,
                epochs: int, learning_rate: float, regularization: float, device: torch.device):
    """改进的训练PyTorch模型函数，添加学习率调度和早停"""
    criterion = nn.CrossEntropyLoss(reduction='mean')
    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=regularization)
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=5)
    
    model.to(device)
    
    print(f"开始训练，设备: {device}")
    
    best_val_acc = 0
    patience_counter = 0
    patience = 10  # 早停耐心值
    
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        correct_predictions = 0
        total_samples = 0
        
        for batch_idx, (features, labels) in enumerate(train_loader):
            features, labels = features.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(features)
            loss = criterion(outputs, labels)
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            
            total_loss += loss.item() * labels.size(0)
            _, predicted = torch.max(outputs.data, 1)
            total_samples += labels.size(0)
            correct_predictions += (predicted == labels).sum().item()
        
        avg_train_loss = total_loss / total_samples
        train_accuracy = correct_predictions / total_samples
        
        # 验证阶段
        model.eval()
        val_loss = 0
        val_correct_predictions = 0
        val_total_samples = 0
        with torch.no_grad():
            for features, labels in val_loader:
                features, labels = features.to(device), labels.to(device)
                outputs = model(features)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item() * labels.size(0)
                _, predicted = torch.max(outputs.data, 1)
                val_total_samples += labels.size(0)
                val_correct_predictions += (predicted == labels).sum().item()
        
        avg_val_loss = val_loss / val_total_samples
        val_accuracy = val_correct_predictions / val_total_samples
        
        # 学习率调度
        scheduler.step(val_accuracy)
        
        # 早停检查
        if val_accuracy > best_val_acc:
            best_val_acc = val_accuracy
            patience_counter = 0
        else:
            patience_counter += 1
        
        if (epoch + 1) % 10 == 0 or epoch == 0:
            current_lr = optimizer.param_groups[0]['lr']
            print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {avg_train_loss:.4f}, Train Acc: {train_accuracy:.4f}, "
                  f"Val Loss: {avg_val_loss:.4f}, Val Acc: {val_accuracy:.4f}, LR: {current_lr:.6f}")
        
        # 早停
        if patience_counter >= patience:
            print(f"早停触发，在第{epoch + 1}轮停止训练")
            break
    
    return model


def evaluate_model(model: MaxEntClassifierPyTorch, test_loader: DataLoader, device: torch.device, class_names: List[str]):
    """评估PyTorch模型"""
    model.eval()
    model.to(device)
    
    correct_predictions = 0
    total_samples = 0
    all_predictions = []
    all_labels = []
    
    with torch.no_grad():
        for features, labels in test_loader:
            features, labels = features.to(device), labels.to(device)
            outputs = model(features)
            _, predicted = torch.max(outputs.data, 1)
            
            total_samples += labels.size(0)
            correct_predictions += (predicted == labels).sum().item()
            
            all_predictions.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            
    accuracy = correct_predictions / total_samples
    print(f"测试准确率: {accuracy:.4f}")
    
    # 计算每个类别的精确率和召回率
    num_classes = len(class_names)
    print("\n各类别详细指标:")
    print("-" * 50)
    
    from sklearn.metrics import precision_recall_fscore_support
    precision, recall, f1, _ = precision_recall_fscore_support(all_labels, all_predictions, average=None, labels=range(num_classes))
    
    for i in range(num_classes):
        print(f"{class_names[i]}: Precision={precision[i]:.4f}, Recall={recall[i]:.4f}, F1={f1[i]:.4f}")
    
    return accuracy


def hyperparameter_tuning_pytorch(X_train_tensor, y_train_tensor, X_val_tensor, y_val_tensor, feature_dim, num_classes=5):
    """改进的PyTorch超参数调优，更高效的搜索策略"""
    print("开始PyTorch超参数调优...")
    
    # 优化的超参数搜索空间
    learning_rates = [0.001, 0.005, 0.01]
    regularizations = [0.0001, 0.001, 0.01]
    batch_sizes = [64, 128]
    dropout_rates = [0.2, 0.3, 0.5]
    epochs_per_trial = 20  # 减少epoch数量以加速调优
    
    best_accuracy = 0
    best_params = {}
    results = []
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 使用更智能的搜索策略
    param_combinations = [
        (0.005, 0.001, 64, 0.3),   # 平衡的参数组合
        (0.001, 0.0001, 128, 0.2), # 保守的参数组合
        (0.01, 0.01, 64, 0.5),     # 激进的参数组合
        (0.001, 0.001, 64, 0.3),   # 原始组合
        (0.005, 0.0001, 128, 0.3), # 优化组合
    ]
    
    for lr, reg, bs, dropout in param_combinations:
        print(f"测试 lr={lr}, reg={reg}, batch_size={bs}, dropout={dropout}")
        
        train_dataset = TextDataset(X_train_tensor, y_train_tensor)
        train_loader = DataLoader(train_dataset, batch_size=bs, shuffle=True)
        val_dataset = TextDataset(X_val_tensor, y_val_tensor)
        val_loader = DataLoader(val_dataset, batch_size=bs, shuffle=False)
        
        model = MaxEntClassifierPyTorch(num_classes=num_classes, feature_dim=feature_dim, dropout_rate=dropout)
        
        # 训练模型
        trained_model = train_model(model, train_loader, val_loader, 
                                    epochs_per_trial, lr, reg, device)
        
        # 评估验证集
        trained_model.eval()
        val_correct_predictions = 0
        val_total_samples = 0
        with torch.no_grad():
            for features, labels in val_loader:
                features, labels = features.to(device), labels.to(device)
                outputs = trained_model(features)
                _, predicted = torch.max(outputs.data, 1)
                val_total_samples += labels.size(0)
                val_correct_predictions += (predicted == labels).sum().item()
        
        val_accuracy = val_correct_predictions / val_total_samples
        results.append((lr, reg, bs, dropout, val_accuracy))
        
        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            best_params = {'learning_rate': lr, 'regularization': reg, 'batch_size': bs, 'dropout_rate': dropout}
        
        print(f"验证准确率: {val_accuracy:.4f}")
        print("-" * 50)
    
    print(f"\n最佳参数: {best_params}")
    print(f"最佳验证准确率: {best_accuracy:.4f}")
    
    return best_params, results


def main_pytorch():
    """主函数 - 完整的PyTorch机器学习流程演示"""
    print("=" * 60)
    print("基于N-gram的最大熵分类器 - PyTorch SST-5五分类任务 (改进版)")
    print("=" * 60)
    
    # 1. 加载SST-5数据集
    sst5_data_dir = "nlp/dataset/sst5"
    (train_texts, train_labels), (dev_texts, dev_labels), (test_texts, test_labels) = load_sst5_dataset(sst5_data_dir)
    
    class_names = ['Very Negative', 'Negative', 'Neutral', 'Positive', 'Very Positive']
    
    print(f"数据集大小: 训练集{len(train_texts)}, 验证集{len(dev_texts)}, 测试集{len(test_texts)}")
    
    # 2. 改进的特征提取
    feature_extractor = NGramFeatureExtractorPyTorch(
        n_range=(1, 3),
        max_features=15000,  # 增加特征数量
        min_freq=2,          # 降低最小频率以保留更多特征
        use_tfidf=True       # 使用TF-IDF权重
    )
    
    # 构建词汇表（只用训练集）
    feature_extractor.build_vocabulary(train_texts)
    
    # 转换为特征矩阵 (PyTorch Tensor)
    X_train_tensor = feature_extractor.texts_to_features(train_texts)
    X_val_tensor = feature_extractor.texts_to_features(dev_texts)
    X_test_tensor = feature_extractor.texts_to_features(test_texts)
    
    y_train_tensor = torch.tensor(train_labels, dtype=torch.long)
    y_val_tensor = torch.tensor(dev_labels, dtype=torch.long)
    y_test_tensor = torch.tensor(test_labels, dtype=torch.long)
    
    print(f"特征矩阵形状: {X_train_tensor.shape}")
    
    # 3. 超参数调优
    best_params, tuning_results = hyperparameter_tuning_pytorch(
        X_train_tensor, y_train_tensor, X_val_tensor, y_val_tensor, feature_extractor.vocab_size, num_classes=5
    )
    
    # 4. 使用最佳参数训练最终模型
    print("\n使用最佳参数训练最终模型...")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    final_model = MaxEntClassifierPyTorch(
        num_classes=5,  # 修改为5分类
        feature_dim=feature_extractor.vocab_size,
        dropout_rate=best_params['dropout_rate']
    )
    
    train_dataset = TextDataset(X_train_tensor, y_train_tensor)
    train_loader = DataLoader(train_dataset, batch_size=best_params['batch_size'], shuffle=True)
    val_dataset = TextDataset(X_val_tensor, y_val_tensor)
    val_loader = DataLoader(val_dataset, batch_size=best_params['batch_size'], shuffle=False)
    test_dataset = TextDataset(X_test_tensor, y_test_tensor)
    test_loader = DataLoader(test_dataset, batch_size=best_params['batch_size'], shuffle=False)
    
    final_model = train_model(final_model, train_loader, val_loader, 
                              epochs=50,  # 增加最终训练轮数
                              learning_rate=best_params['learning_rate'], 
                              regularization=best_params['regularization'], 
                              device=device)
    
    # 5. 模型评估
    print("\n" + "=" * 50)
    print("最终模型评估")
    print("=" * 50)
    
    test_accuracy = evaluate_model(final_model, test_loader, device, class_names)
    
    # 6. 演示预测
    print("\n" + "=" * 50)
    print("预测演示")
    print("=" * 50)
    
    demo_texts = [
        "This movie is absolutely fantastic and amazing",
        "The film was terrible and completely boring",
        "It's an okay movie nothing too special",
        "One of the worst films I have ever seen",
        "A masterpiece of cinema truly outstanding"
    ]
    
    final_model.eval()
    final_model.to(device)
    with torch.no_grad():
        for text in demo_texts:
            features = feature_extractor.text_to_features(text).unsqueeze(0).to(device) # 添加batch维度
            outputs = final_model(features)
            probabilities = torch.softmax(outputs, dim=1).squeeze(0).cpu().numpy()
            prediction = torch.argmax(outputs, dim=1).item()
            
            print(f"文本: {text}")
            print(f"预测类别: {class_names[prediction]}")
            print(f"概率分布: {dict(zip(class_names, probabilities))}")
            print("-" * 50)
    
    print("\nPyTorch版本训练完成！")
    print(f"最终测试准确率: {test_accuracy:.4f}")
    
    return final_model, feature_extractor


if __name__ == "__main__":
    # 确保安装了scikit-learn用于评估指标
    try:
        import sklearn
    except ImportError:
        print("请安装scikit-learn: pip install scikit-learn")
        exit()

    main_pytorch()