import os
import joblib
import pandas as pd
import re
import zhconv
import jieba.posseg as psg
from sklearn.feature_extraction.text import CountVectorizer
import pickle
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
import numpy as np

# 设置当前工作目录
os.chdir(os.path.dirname(os.path.abspath(__file__)))


def clean_data(content):
    """清洗邮件内容 - 修复所有错误"""
    # 1. 去除非中文字符 - 修正正则表达式
    content = re.sub(r'[^\u4e00-\u9fa5]', '', content)

    # 2. 繁体转简体
    content = zhconv.convert(content, 'zh-cn')

    # 3. 分词，过滤词性
    content_pos = psg.cut(content)
    allow_pos = ['n', 'nr', 'ns', 'nt', 'v', 'a']

    words = []
    for word, pos in content_pos:
        if pos in allow_pos:
            words.append(word)

    # 4. 使用空格连接词语 - 确保特征提取器能识别单独词语
    return ' '.join(words)


def train_model():
    """训练模型 - 修复并改进"""
    # 加载数据集 - 使用with确保文件正确关闭
    with open('data/03_模型训练数据.pkl', 'rb') as f:
        train_data = pickle.load(f)

    model = MultinomialNB()
    model.fit(train_data['x'], train_data['y'])

    # 保存模型 - 使用标准.pkl扩展名
    joblib.dump(model, 'data/04_垃圾邮件分类模型.pkl')
    print("模型训练完成并保存")


def evaluate():
    """评估模型性能 - 修正所有错误"""
    # 1. 加载测试数据
    test_data = pd.read_csv('data/01_原始测试集.csv')

    # 2. 加载特征词汇表
    with open('data/03_模型训练特征.pkl', 'rb') as f:
        vocab = pickle.load(f)

    # 3. 初始化特征转换器 - 使用训练时的词汇表
    transfer = CountVectorizer(vocabulary=vocab)

    # 4. 加载模型 - 修正扩展名
    model = joblib.load('data/04_垃圾邮件分类模型.pth')

    # 5. 批量处理数据 - 高效且正确
    cleaned_contents = []
    true_labels = []

    # 只调用一次clean_data，避免重复计算
    for content, label in zip(test_data['content'].values, test_data['label'].values):
        cleaned = clean_data(content)  # 调用修正后的clean_data
        if cleaned.strip():  # 检查非空内容
            cleaned_contents.append(cleaned)
            true_labels.append(label)

    # 6. 批量特征转换 - 高效且内存友好
    x_test = transfer.transform(cleaned_contents)  # 保持稀疏格式

    # 7. 预测和评估
    y_pred = model.predict(x_test)

    # 8. 输出分类报告
    print(classification_report(true_labels, y_pred))

    # 可选：返回详细报告
    return classification_report(true_labels, y_pred, output_dict=True)

if __name__ == '__main__':
    # 根据需求调用函数
    # train_model()
    report = evaluate()

    # 打印各指标值
    print("\n关键指标:")
    print(f"准确率: {report['accuracy']:.2f}")
    for label in report:
        if label not in ['accuracy', 'macro avg', 'weighted avg']:
            print(
                f"类别 {label} - 精确率: {report[label]['precision']:.2f}, 召回率: {report[label]['recall']:.2f}, F1: {report[label]['f1-score']:.2f}")

#
#
# import os
# import joblib
# import pandas as pd #pandas 数据处理模块
# import codecs #处理文本编码与解码的核心工具
# import re #正则表达式操作模块
# import zhconv #中文简繁体转换的工具
# import jieba #分词器
# import jieba.posseg as psg #分词器子模块 能够同时返回分词结果和每个词的词性标签
# from sklearn.feature_extraction.text import CountVectorizer #通过词频统计将文本数据转换为数值矩阵
# import pickle #Python 标准库中用于对象序列化和反序列化
#
# from sklearn.metrics import classification_report
# from sklearn.naive_bayes import MultinomialNB
#
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
#
# if __name__ == '__main__':
#     # 加载数据集
#     train_data = pickle.load(open('data/03_模型训练数据.pkl', 'rb'))
#
#
# def train():
#     model = MultinomialNB()
#     model.fit(train_data['x'], train_data['y'])
#     joblib.dump(model, 'data/04_垃圾邮件分类模型.pth')
#
# def clean_data(content):
#     # 取出中文
#     content = re.sub(r'[^\u4e00-\u9fa5]]','',content)
#     # 繁体转简体
#     content = zhconv.convert(content,locale='zh-cn')
#     return content
#     # 分词，过滤词性
#     content_pos = psg.cut(content)
#     # 需要留存的词性
#     allow_pos = ['n', 'nr', 'ns', 'nt', 'v', 'a']
#
#     words = []
#     for word,pos in content_pos:
#         if pos in allow_pos:
#             words.append(word)
#
#     return ' '.join(words)
#
# def evaluate():
#     # 加载数据
#     test_data = pd.read_csv('data/01_原始测试集.csv')
#     # 特征提取器
#     vocab = pickle.load(open('data/03_模型训练特征.pkl', 'rb'))
#     transfer = CountVectorizer(vocabulary=vocab)
#
#     # 加载模型
#     model = joblib.load('data/04_垃圾邮件分类模型.pth')
#
#     # 测试集评估
#     x_test = []
#     y_test = []
#
#     for content,label in zip(test_data['content'].values,test_data['label'].values):
#         if(len(clean_data(content)) == 0):
#             continue
#         x_test.append(transfer.transform([clean_data(content)]).toarray().tolist()[0]) #
#         y_test.append(label) #
#
#     y_pre = model.predict(x_test)
#
#     print(classification_report(y_test, y_pre))
#
# # train()
# evaluate()