import os
import re
import csv
import email
import chardet
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from email.header import decode_header
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score

# 1. 邮件解析与标签整合
def load_labels(index_path):
    labels = {}
    try:
        with open(index_path, 'r', encoding='latin1', errors='ignore') as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) >= 2:
                    label = parts[0].lower()
                    # 处理可能包含空格的路径
                    path = ' '.join(parts[1:])
                    labels[path] = 0 if label == 'ham' else 1
    except Exception as e:
        print(f"加载标签错误: {str(e)}")
    return labels

# 2. HTML/特殊字符清洗
def clean_html(text):
    if not text:
        return ""
    soup = BeautifulSoup(text, "html.parser")
    cleaned = soup.get_text(separator=" ")
    cleaned = re.sub(r'\s+', ' ', cleaned)  # 移除多余空格
    return cleaned.strip()

def decode_mime(header):
    if not header:
        return ""
    try:
        decoded = decode_header(header)
        result = []
        for part, encoding in decoded:
            if isinstance(part, bytes):
                try:
                    result.append(part.decode(encoding or 'utf-8', 'ignore'))
                except:
                    result.append(part.decode('latin1', 'ignore'))
            else:
                result.append(part)
        return ''.join(result)
    except:
        return str(header)

# 3. 提取元数据
def parse_email(file_path):
    try:
        with open(file_path, 'rb') as f:
            raw = f.read()
            encoding = chardet.detect(raw)['encoding'] or 'latin1'
            msg = email.message_from_bytes(raw)
        
        # 提取元数据
        meta = {
            'from': decode_mime(msg.get('From', '')),
            'to': decode_mime(msg.get('To', '')),
            'subject': decode_mime(msg.get('Subject', '')),
            'date': msg.get('Date', ''),
            'body': ''
        }
        
        # 提取正文
        body_parts = []
        for part in msg.walk():
            content_type = part.get_content_type()
            if content_type == 'text/plain' or content_type == 'text/html':
                try:
                    payload = part.get_payload(decode=True)
                    if payload:
                        charset = part.get_content_charset() or encoding
                        try:
                            text = payload.decode(charset, 'ignore')
                        except:
                            text = payload.decode('latin1', 'ignore')
                        
                        if content_type == 'text/html':
                            text = clean_html(text)
                        body_parts.append(text)
                except Exception as e:
                    continue
        
        meta['body'] = ' '.join(body_parts)
        return meta
    
    except Exception as e:
        print(f"解析邮件错误 {file_path}: {str(e)}")
        return None

# 4. 数据分批处理
def batch_process(base_dir, labels, batch_size=1000):
    all_data = []
    count = 0
    processed = 0
    total = len(labels)
    
    for rel_path, label in labels.items():
        # 修复路径分隔符问题
        rel_path = rel_path.replace('\\', '/').replace('../', '')
        abs_path = os.path.join(base_dir, rel_path)
        
        if not os.path.exists(abs_path):
            # 尝试在data目录下查找
            abs_path = os.path.join(base_dir, "data", rel_path)
            if not os.path.exists(abs_path):
                print(f"文件未找到: {rel_path}")
                continue
        
        parsed = parse_email(abs_path)
        if parsed:
            parsed['label'] = label
            all_data.append(parsed)
            count += 1
            processed += 1
            
            # 分批处理
            if len(all_data) >= batch_size:
                yield all_data
                all_data = []
                print(f"已处理: {processed}/{total} 封邮件")
    
    if all_data:
        yield all_data
    print(f"总共处理: {processed}/{total} 封邮件")

# 主处理流程
def process_dataset(index_path, base_dir, output_csv):
    labels = load_labels(index_path)
    if not labels:
        print("未加载到标签，请检查index文件路径")
        return
    
    first_batch = True
    total_processed = 0
    
    for batch in batch_process(base_dir, labels, batch_size=500):
        df = pd.DataFrame(batch)
        total_processed += len(df)
        
        # 追加写入CSV
        if first_batch:
            df.to_csv(output_csv, index=False)
            first_batch = False
        else:
            df.to_csv(output_csv, mode='a', header=False, index=False)
        
        print(f"已保存批次: {len(batch)} 封邮件, 总计: {total_processed}")

# 机器学习垃圾邮件识别
def spam_detection(csv_path):
    try:
        df = pd.read_csv(csv_path)
    except:
        print("CSV文件读取失败，请检查路径")
        return None
    
    print(f"数据集大小: {len(df)}")
    
    # 数据预处理
    df = df.dropna(subset=['body'])
    df['body'] = df['body'].fillna('').astype(str)
    
    # 划分数据集
    X_train, X_test, y_train, y_test = train_test_split(
        df['body'], df['label'], test_size=0.2, random_state=42
    )
    
    # 特征提取
    vectorizer = TfidfVectorizer(max_features=5000, stop_words='english')
    X_train_vec = vectorizer.fit_transform(X_train)
    X_test_vec = vectorizer.transform(X_test)
    
    # 训练模型
    model = LogisticRegression(max_iter=1000, n_jobs=-1)
    model.fit(X_train_vec, y_train)
    
    # 评估
    y_pred = model.predict(X_test_vec)
    acc = accuracy_score(y_test, y_pred)
    print(f"模型准确率: {acc:.4f}")
    
    # 添加预测结果到数据集
    df['prediction'] = model.predict(vectorizer.transform(df['body']))
    df.to_csv(csv_path, index=False)
    return df

# 示例用法
if __name__ == "__main__":
    # 修复Windows路径问题 - 使用原始字符串(r前缀)或正斜杠
    base_dir = r"D:\桌面\trec"  # 替换为您的实际路径
    
    # 自动构建路径
    index_path = os.path.join(base_dir, "full", "index")
    output_csv = os.path.join(base_dir, "trec06c_processed.csv")
    
    # 检查路径是否存在
    if not os.path.exists(index_path):
        print(f"index文件不存在: {index_path}")
        # 尝试备选路径
        index_path = os.path.join(base_dir, "trec06c", "full", "index")
    
    print(f"使用index路径: {index_path}")
    print(f"输出CSV路径: {output_csv}")
    
    # 处理数据集
    process_dataset(index_path, base_dir, output_csv)
    
    # 垃圾邮件检测
    if os.path.exists(output_csv):
        final_df = spam_detection(output_csv)
        if final_df is not None:
            print("\n处理后的数据集样例:")
            print(final_df[['from', 'subject', 'label', 'prediction']].head(3))
    else:
        print("CSV文件未创建，无法进行垃圾邮件检测")
