import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import jieba
from sklearn.feature_extraction.text import CountVectorizer

pd.set_option('display.max_columns', None,
              'display.expand_frame_repr', False)
spr = 1  # sub plot rows
spc = 1  # sub plot columns
spn = 0  # sub plot number
plt.figure(figsize=[6, 6])

# 4．读取zhoukao.txt文件，完成下列贝叶斯处理（25分）
# （1）数据预处理
# ①将‘语句表达’作为特征，‘状态’作为标签（2分）
df = pd.read_csv(r'../../../../../large_data/ML2/zhoukao.txt')
print(df[:5])
x = df.iloc[:, 0]
y = df.iloc[:, 1]
from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
y = enc.fit_transform(y)

# ②将‘语句表达’特征使用结巴进行分词处理（4分）
x = x.map(lambda x: jieba.lcut(x, cut_all=False))
print(x[:5])

# ③使用统计词频方式，将x的词汇出现频数进行统计（4分）
x = x.map(lambda x: ' '.join(x))
print(x[:5])
model = CountVectorizer(token_pattern='[a-zA-Z\u4e00-\u9fa5]+')
tf = model.fit_transform(x)

# ④打印获取的单词及其每个样本对应单词个数（2分）
print(model.get_feature_names())
print(tf.A)

# （2）模型训练
# ①生成对应的朴素贝叶斯模型（3分）
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()

# ②训练模型（2分）
model.fit(tf, y)

# ③预测结果（2分）
h = model.predict(tf)
proba = model.predict_proba(tf)[:, 1]

# ④输出至少5项关于分类的评估指标（3分）
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
print(f'accuracy_score = {accuracy_score(y, h)}')
print(f'precision_score = {precision_score(y, h)}')
print(f'recall_score = {recall_score(y, h)}')
print(f'f1_score = {f1_score(y, h)}')
print(f'roc_auc_score = {roc_auc_score(y, proba)}')

# ⑤使用第一个样本生成词云图（3分）
text = x[0]
print(text)
from wordcloud import WordCloud
pic = WordCloud(background_color='white',
                width=600,
                height=600,
                margin=10).generate(text)
spn += 1
plt.subplot(spr, spc, spn)
plt.imshow(pic)

# Finally show all plotting
plt.show()
