"""
垃圾邮件检测器
使用scikit-learn构建的机器学习模型来识别垃圾邮件
"""
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings('ignore')

# 下载必要的NLTK数据
try:
    nltk.data.find('tokenizers/punkt')
    nltk.data.find('corpora/stopwords')
except LookupError:
    nltk.download('punkt')
    nltk.download('stopwords')

class SpamDetector:
    def __init__(self):
        self.vectorizer = None
        self.model = None
        self.stemmer = PorterStemmer()
        self.stop_words = set(stopwords.words('english'))
        
    def preprocess_text(self, text):
        """
        文本预处理函数
        """
        # 转换为小写
        text = text.lower()
        
        # 移除非字母字符，但保留空格
        text = re.sub(r'[^a-zA-Z\s]', '', text)
        
        # 分词
        words = text.split()
        
        # 移除停用词并进行词干提取
        words = [self.stemmer.stem(word) for word in words 
                if word not in self.stop_words and len(word) > 2]
        
        return ' '.join(words)
    
    def load_data(self, file_path=None):
        """
        加载数据集
        """
        if file_path:
            # 如果提供了文件路径，加载自定义数据
            data = pd.read_csv(file_path, encoding='latin-1')
        else:
            # 创建示例数据集
            data = self.create_sample_dataset()
        
        return data
    
    def create_sample_dataset(self):
        """
        创建示例数据集
        """
        spam_emails = [
            "URGENT! You have won $1000000! Claim now by calling 123-456-7890!",
            "FREE VIAGRA! Buy now! Limited time offer! Call immediately!",
            "Congratulations! You've been selected for a cash prize! Click here!",
            "LOSE WEIGHT FAST! Amazing diet pills! Order now!",
            "Make money from home! Work from anywhere! Guaranteed income!",
            "FREE LOTTERY TICKET! Win big! Don't miss out!",
            "CLICK HERE FOR FREE MONEY! Limited time only!",
            "Amazing discount! 90% off! Buy now or never!",
            "You've inherited money from a Nigerian prince! Contact us!",
            "FREE CREDIT REPORT! Check your score now!",
            "CASINO BONUS! Play now and win! Free spins!",
            "Debt consolidation! Lower your payments! Apply now!",
            "HOME LOAN APPROVED! No credit check required!",
            "MIRACLE CURE! Doctors hate this one trick!",
            "FREE GIFT CARD! Claim your $500 reward now!"
        ]
        
        ham_emails = [
            "Hi John, how are you doing today? Let's meet for coffee tomorrow.",
            "Meeting scheduled for 3 PM in conference room A. Please bring your reports.",
            "Your package has been delivered successfully. Thank you for your order.",
            "Reminder: Your dentist appointment is tomorrow at 2 PM.",
            "Happy birthday! Hope you have a wonderful day with family and friends.",
            "The project deadline has been extended to next Friday. Please update your timeline.",
            "Thank you for your presentation today. It was very informative and well-prepared.",
            "Flight confirmation: Your flight to New York departs at 8 AM tomorrow.",
            "Weekly team meeting notes are attached. Please review before our next meeting.",
            "Your subscription will expire next month. Would you like to renew?",
            "Great job on the quarterly report. The client was very impressed.",
            "Lunch reservation confirmed for tomorrow at 12:30 PM at the Italian restaurant.",
            "Please find the requested documents attached to this email.",
            "Your order has been processed and will be shipped within 2 business days.",
            "The conference call with the client went well. They approved the proposal."
        ]
        
        
        # 创建DataFrame
        emails = spam_emails + ham_emails
        labels = ['spam'] * len(spam_emails) + ['ham'] * len(ham_emails)
        
        data = pd.DataFrame({
            'text': emails,
            'label': labels
        })
        
        return data
    
    def prepare_features(self, data):
        """
        准备特征数据
        """
        # 预处理文本
        print("正在预处理文本...")
        data['processed_text'] = data['text'].apply(self.preprocess_text)
        
        # 创建TF-IDF向量化器
        self.vectorizer = TfidfVectorizer(
            max_features=5000,  # 最大特征数
            ngram_range=(1, 2),  # 使用1-gram和2-gram
            min_df=1,  # 最小文档频率
            max_df=0.95  # 最大文档频率
        )
        
        # 转换文本为TF-IDF特征
        print("正在创建TF-IDF特征...")
        X = self.vectorizer.fit_transform(data['processed_text'])
        
        # 编码标签
        y = data['label'].map({'ham': 0, 'spam': 1})
        
        return X, y
    
    def train_models(self, X, y):
        """
        训练多个模型并比较性能
        """
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )
        
        # 定义模型
        models = {
            'Naive Bayes': MultinomialNB(),
            'SVM': SVC(kernel='linear', random_state=42),
            'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42),
            'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000)
        }
        
        results = {}
        
        print("正在训练和评估模型...")
        for name, model in models.items():
            print(f"\n训练 {name}...")
            
            # 训练模型
            model.fit(X_train, y_train)
            
            # 预测
            y_pred = model.predict(X_test)
            
            # 计算准确率
            accuracy = accuracy_score(y_test, y_pred)
            
            # 交叉验证
            cv_scores = cross_val_score(model, X_train, y_train, cv=5)
            
            results[name] = {
                'model': model,
                'accuracy': accuracy,
                'cv_mean': cv_scores.mean(),
                'cv_std': cv_scores.std(),
                'y_test': y_test,
                'y_pred': y_pred
            }
            
            print(f"{name} - 准确率: {accuracy:.4f}")
            print(f"{name} - 交叉验证: {cv_scores.mean():.4f} (+/- {cv_scores.std() * 2:.4f})")
        
        # 选择最佳模型
        best_model_name = max(results.keys(), key=lambda k: results[k]['cv_mean'])
        self.model = results[best_model_name]['model']
        
        print(f"\n最佳模型: {best_model_name}")
        
        return results, X_test, y_test
    
    def optimize_model(self, X, y):
        """
        使用网格搜索优化最佳模型
        """
        print("正在优化模型参数...")
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )
        
        # 对朴素贝叶斯进行参数优化
        param_grid = {
            'alpha': [0.1, 0.5, 1.0, 2.0, 5.0]
        }
        
        grid_search = GridSearchCV(
            MultinomialNB(),
            param_grid,
            cv=5,
            scoring='accuracy',
            n_jobs=-1
        )
        
        grid_search.fit(X_train, y_train)
        
        self.model = grid_search.best_estimator_
        
        print(f"最佳参数: {grid_search.best_params_}")
        print(f"最佳交叉验证分数: {grid_search.best_score_:.4f}")
        
        return X_test, y_test
    
    def evaluate_model(self, X_test, y_test):
        """
        详细评估模型性能
        """
        y_pred = self.model.predict(X_test)
        
        print("\n=== 模型评估结果 ===")
        print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
        print("\n分类报告:")
        print(classification_report(y_test, y_pred, target_names=['Ham', 'Spam']))
        
        # 绘制混淆矩阵
        plt.figure(figsize=(8, 6))
        cm = confusion_matrix(y_test, y_pred)
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=['Ham', 'Spam'], yticklabels=['Ham', 'Spam'])
        plt.title('混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.tight_layout()
        plt.savefig('confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.show()
        
        return y_pred
    
    def predict_email(self, email_text):
        """
        预测单个邮件是否为垃圾邮件
        """
        if self.model is None or self.vectorizer is None:
            raise ValueError("模型尚未训练！请先调用 train() 方法。")
        
        # 预处理文本
        processed_text = self.preprocess_text(email_text)
        
        # 向量化
        text_vector = self.vectorizer.transform([processed_text])
        
        # 预测
        prediction = self.model.predict(text_vector)[0]
        probability = self.model.predict_proba(text_vector)[0]
        
        result = {
            'prediction': 'Spam' if prediction == 1 else 'Ham',
            'confidence': max(probability),
            'spam_probability': probability[1],
            'ham_probability': probability[0]
        }
        
        return result
    
    def save_model(self, model_path='spam_detector_model.pkl', vectorizer_path='vectorizer.pkl'):
        """
        保存训练好的模型和向量化器
        """
        with open(model_path, 'wb') as f:
            pickle.dump(self.model, f)
        
        with open(vectorizer_path, 'wb') as f:
            pickle.dump(self.vectorizer, f)
        
        print(f"模型已保存到: {model_path}")
        print(f"向量化器已保存到: {vectorizer_path}")
    
    def load_model(self, model_path='spam_detector_model.pkl', vectorizer_path='vectorizer.pkl'):
        """
        加载保存的模型和向量化器
        """
        with open(model_path, 'rb') as f:
            self.model = pickle.load(f)
        
        with open(vectorizer_path, 'rb') as f:
            self.vectorizer = pickle.load(f)
        
        print("模型和向量化器已成功加载！")
    
    def train(self, data_path=None):
        """
        完整的训练流程
        """
        print("=== 开始训练垃圾邮件检测器 ===")
        
        # 加载数据
        data = self.load_data(data_path)
        print(f"数据集大小: {len(data)}")
        print(f"垃圾邮件数量: {sum(data['label'] == 'spam')}")
        print(f"正常邮件数量: {sum(data['label'] == 'ham')}")
        
        # 准备特征
        X, y = self.prepare_features(data)
        
        # 训练和比较模型
        results, X_test, y_test = self.train_models(X, y)
        
        # 优化最佳模型
        X_test, y_test = self.optimize_model(X, y)
        
        # 评估模型
        self.evaluate_model(X_test, y_test)
        
        print("\n=== 训练完成 ===")
        
        return self


def main():
    """
    主函数 - 演示垃圾邮件检测器的使用
    """
    # 创建检测器实例
    detector = SpamDetector()
    
    # 训练模型
    detector.train()
    
    # 保存模型
    detector.save_model()
    
    print("\n=== 测试垃圾邮件检测 ===")
    
    # 测试邮件
    test_emails = [
        "Congratulations! You've won $1,000,000! Click here to claim your prize now!",
        "Hi Sarah, are you free for lunch tomorrow? Let me know what time works for you.",
        "URGENT: Your account will be closed! Act now to avoid fees!",
        "Meeting rescheduled to 3 PM in conference room B. Please confirm attendance.",
        "FREE MONEY! No strings attached! Click here now!"
    ]
    
    for i, email in enumerate(test_emails, 1):
        result = detector.predict_email(email)
        print(f"\n邮件 {i}: {email[:50]}...")
        print(f"预测结果: {result['prediction']}")
        print(f"置信度: {result['confidence']:.4f}")
        print(f"垃圾邮件概率: {result['spam_probability']:.4f}")


if __name__ == "__main__":
    main()