import re
import classes as classes
import jieba as jieba
import numpy as np
import pandas as pd
import imageio
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix, classification_report
# 支持向量机
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from matplotlib.font_manager import FontProperties
# 随机森林
from sklearn.ensemble import RandomForestClassifier


# 设置中文字体
font = FontProperties(fname="SimHei.ttf")
plt.rcParams['font.family'] = font.get_name()


# 一、读取数据，共80万条，数据包括['标签列','内容']，其中非垃圾短信标签为0，垃圾短信为1
data = pd.read_csv('message80W.csv', encoding='utf-8', index_col=0, header=None)
# 返回列名
data.columns = ['类别', '短信']
# 统计数据，查看各类总数
data.类别.value_counts()

# 二、文本预处理
temp = data.短信
temp.isnull().sum()

# 去重
data_dup = temp.drop_duplicates()

# 脱敏
l1 = data_dup.astype('str').apply(lambda x: len(x)).sum()
data_qumin = data_dup.astype('str').apply(lambda x: re.sub('x', '', x))
l2 = data_qumin.astype('str').apply(lambda x: len(x)).sum()
# print(data_qumin.head(10))
# print('减少了' + str(l1-l2) + '个字符')

# 加载自定义词典，文件中包含短信内容的几个重要词汇，在分词过程中，以切分部分有用的信息
jieba.load_userdict('newdic1.txt')
# 分词
data_cut = data_qumin.astype('str').apply(lambda x: list(jieba.cut(x)))
# print(data_cut)

# 去停用词
stopword = pd.read_csv('stopword.txt', sep='ooo', encoding='gbk', header=None, engine='python')
stopword = [' '] + list(stopword[0])
l3 = data_cut.astype('str').apply(lambda x: len(x)).sum()
data_qustop = data_cut.apply(lambda x: [i for i in x if i not in stopword])
l4 = data_qustop.astype('str').apply(lambda x: len(x)).sum()
# print(l3)
# print(l4)
# print(data_qustop)
# print('减少了' + str(l3-l4) + '个字符')

# 从数据集中筛选出非空的数据
data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] != []]]
# print(data_qustop)
# data_qustop.to_csv('output.csv', index=False)

# 三、词频统计
# 遍历data_qustop索引获取 '类别' 列的值，赋给变量 lab
lab = [data.loc[i, '类别'] for i in data_qustop.index]
# 将 lab 转换为 Series，并以 data_qustop 的索引作为新 Series 的索引
lab1 = pd.Series(lab, index=data_qustop.index)


# 定义 cipin 函数，用于词频统计，词频统计的数量阈值num定义为10
def cipin(data_qustop, num=10):
    # 将 data_qustop 中的每个列表转换为字符串并通过空格连接起来
    temp = [' '.join(x) for x in data_qustop]
    # 将 temp 中的字符串通过空格连接起来
    temp1 = ' '.join(temp)
    # 将 temp1 按照空格分割为单词，并统计每个单词的出现频率
    temp2 = pd.Series(temp1.split()).value_counts()
    # 返回词频大于 num 的结果
    return temp2[temp2 > num]


# 根据lab1的值为1进行筛选，获取垃圾短信对应的数据集
data_gar = data_qustop.loc[lab1 == 1]
# 根据lab1的值为0进行筛选，获取非垃圾短信对应的数据集
data_nor = data_qustop.loc[lab1 == 0]
# 调用cipin函数对data_gar进行词频统计，设置num为5，得到垃圾短信的词频统计结果
data_gar1 = cipin(data_gar, num=5)
# 调用 cipin 函数对 data_nor 进行词频统计，设置num为30，得到非垃圾短信的词频统计结果
data_nor1 = cipin(data_nor, num=30)
# print(data_gar1)
# print(data_nor1)

# 绘制垃圾短信词云图
# back_pic = imageio.imread('background.jpg')
# wc = WordCloud(font_path='simhei.ttf',  # 字体
#                background_color='white',    # 背景颜色
#                max_words=2000,   # 最大词数
#                mask=back_pic,   # 背景图片
#                max_font_size=200,  # 字体大小
#                random_state=1234)  # 设置随机配色方案
#
# # fit_words方法的参数是一个字典，键为单词，值为出现的次数
# gar_wordcloud = wc.fit_words(data_gar1)
# plt.figure(figsize=(16, 8))
# plt.imshow(gar_wordcloud)
# plt.axis('off')
# plt.savefig('spam.jpg')
# plt.show()
#
# # 绘制非垃圾短信词云图
# nor_wordcloud = wc.fit_words(data_nor1)
# plt.figure(figsize=(16, 8))
# plt.imshow(nor_wordcloud)
# plt.axis('off')
# plt.savefig('non-spam.jpg')
# plt.show()


# 四、数据采样，采用简单随机抽样的方法，对垃圾短信和非垃圾短信各抽取1万条
# 抽样的样本数量
num = 10000
# 对垃圾短信进行简单随机抽样
adata = data_gar.sample(num, random_state=123)
# 对非垃圾短信进行简单随机抽样
bdata = data_nor.sample(num, random_state=123)

# 将抽样后的垃圾短信和非垃圾短信数据进行合并
data_sample = pd.concat([adata, bdata])
# print(data_sample)

# 对合并后的数据的每一行应用lambda函数，
# 将每个元素转换为字符串并通过空格连接起来
cdata = data_sample.apply(lambda x: ' '.join(x))

# 构造标签数据，将垃圾短信标签设置为1，非垃圾短信标签设置为0
lab = pd.DataFrame([1] * num + [0] * num, index=cdata.index)

# 将抽样后的数据和标签进行合并，按列方向合并
my_data = pd.concat([cdata, lab], axis=1)

# 设置合并后的数据的列名为'message'和'label'
my_data.columns = ['message', 'label']
# print(my_data)


# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(
    my_data.message, my_data.label, test_size=0.2, random_state=123)

# # 训练集
# cv = CountVectorizer()  # 将文本中的词语转化为词频矩阵
# train_cv = cv.fit_transform(x_train)  # 拟合数据，再将数据转化为标准化格式
# train_cv.toarray()
# train_cv.shape  # 查看数据大小
# cv.vocabulary_  # 查看词库内容
# print(train_cv.shape)
# print(cv.vocabulary_)
#
# # 测试集
# cv1 = CountVectorizer(vocabulary=cv.vocabulary_)
# test_cv = cv1.fit_transform(x_test)
# test_cv.shape
# print(test_cv.shape)
# print(cv1)

# # 定义朴素贝叶斯模型
# nb = MultinomialNB()  # 朴素贝叶斯分类器
#
# num_iterations = 10  # 指定迭代次数
#
# train_scores = []  # 记录训练集准确率
# test_scores = []  # 记录测试集准确率
# iterations = []   # 记录迭代次数
#
# # 模型训练与评估，记录指标的变化
# for i in range(num_iterations):
#     # 训练模型
#     nb.fit(train_cv, y_train)
#
#     # 计算训练集准确率
#     train_score = nb.score(train_cv, y_train)
#     train_scores.append(train_score)
#
#     # 计算测试集准确率
#     test_score = nb.score(test_cv, y_test)
#     test_scores.append(test_score)
#
#     # 记录迭代次数
#     iterations.append(i)
#
#     # 输出当前迭代的训练集准确率和测试集准确率
#     print(f"Iteration {i+1}: Train Accuracy = {train_score}, Test Accuracy = {test_score}")
#
# # 绘制训练指标和测试指标的折线图
# plt.plot(iterations, train_scores, label='Train')
# plt.plot(iterations, test_scores, label='Test')
# plt.xlabel('Iteration')
# plt.ylabel('Accuracy')
# plt.title('Model Performance')
# plt.legend()
# plt.show()

# # 朴素贝叶斯
# nb = MultinomialNB()   # 朴素贝叶斯分类器
# nb.fit(train_cv, y_train)   # 训练分类器
# pre = nb.predict(test_cv)  # 预测
#
# print("预测结果:", pre)
#
# # 评价
# # 计算真实类别和预测类别之间的混淆矩阵
# cm = confusion_matrix(y_test, pre)
# # 生成分类报告
# cr = classification_report(y_test, pre)
# print(cm)
# print(cr)
#
#
# # 绘制训练指标和测试指标的图表
# train_score = nb.score(train_cv, y_train)
# test_score = nb.score(test_cv, y_test)
#
# plt.bar(['Train', 'Test'], [train_score, test_score])
# plt.xlabel('Dataset')
# plt.ylabel('Accuracy')
# plt.title('Model Performance')
# plt.show()


# 特征表示
cv = CountVectorizer()  # 将文本中的词语转化为词频矩阵
train_cv = cv.fit_transform(x_train)  # 拟合数据，再将数据转化为标准化格式
test_cv = cv.transform(x_test)

# 支持向量机分类器
# svm = SVC()
# svm.fit(train_cv, y_train)
#
# # 定义参数网格
# param_grid = {'C': [0.1, 1, 10],
#               'kernel': ['linear', 'rbf']}
#
# # 网格搜索
# grid_search = GridSearchCV(svm, param_grid, cv=5)
# grid_search.fit(train_cv, y_train)
#
# # 最佳参数
# best_params = grid_search.best_params_
# print('最佳参数:', best_params)
#
# # 获取参数组合及其对应的模型性能
# results = grid_search.cv_results_
# param_combinations = results['params']
# mean_scores = results['mean_test_score']
#
# # 绘制参数组合与准确率之间的折线图
# labels = [str(params) for params in param_combinations]
# plt.plot(labels, mean_scores)
# plt.xticks(rotation=45)
# plt.xlabel('参数组合')
# plt.ylabel('准确率')
# plt.title('不同参数组合的准确率')
# plt.show()

# # 在测试集上进行预测
# svm_pred = svm.predict(test_cv)
#
# # 计算准确率
# svm_accuracy = accuracy_score(y_test, svm_pred)
# print('支持向量机分类器准确率:', svm_accuracy)
#
# # 输出混淆矩阵
# cm = confusion_matrix(y_test, svm_pred)
# print('混淆矩阵:\n', cm)

# 输出分类报告
# cr = classification_report(y_test, svm_pred)
# print('分类报告:\n', cr)


# 随机森林分类器
rf = RandomForestClassifier()
rf.fit(train_cv, y_train)

# 在测试集上进行预测
rf_pred = rf.predict(test_cv)

# 定义参数网格
param_grid = {'n_estimators': [50, 100, 200],
              'max_depth': [None, 5, 10]}

# 网格搜索
grid_search = GridSearchCV(rf, param_grid, cv=5)
grid_search.fit(train_cv, y_train)

# 最佳参数
best_params = grid_search.best_params_
print('最佳参数:', best_params)

# 计算准确率
rf_accuracy = accuracy_score(y_test, rf_pred)
print('随机森林分类器准确率:', rf_accuracy)

# 输出混淆矩阵
cm = confusion_matrix(y_test, rf_pred)
print('混淆矩阵:\n', cm)

# 输出分类报告
cr = classification_report(y_test, rf_pred)
print('分类报告:\n', cr)

# 获取参数组合及其对应的模型性能
results = grid_search.cv_results_
param_combinations = results['params']
mean_scores = results['mean_test_score']

# 绘制参数组合与准确率之间的折线图
labels = [str(params) for params in param_combinations]
plt.plot(labels, mean_scores)
plt.xticks(rotation=45)
plt.xlabel('参数组合')
plt.ylabel('准确率')
plt.title('不同参数组合的准确率')
plt.show()

