import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

pd.set_option('display.max_columns', None, 'display.expand_frame_repr', False)


def sep(label=''):
    """Utility function to print separator lines."""
    print('-' * 32, label, '-' * 32, sep='')


# 1.	针对week_exam.txt数据完成以下处理
# (1)	数据集读取
# ①	读取数据，打印前5行信息
sep('读取数据，打印前5行信息')
df = pd.read_csv(r'../../../large_data/ML2/week_exam.txt')
print(df.head())

# ②	切分数据特征及标签（最后一列为标签）
x = df.iloc[:, :-1]
y = df.iloc[:, -1]

# (2)	数据预处理
from sklearn.preprocessing import LabelEncoder, StandardScaler

# ①	将数据标签进行处理，变为0,1,2形式
lbl_enc = LabelEncoder()
y = lbl_enc.fit_transform(y)

# ②	将数据的特征使用标准化进行处理
std = StandardScaler()
x = std.fit_transform(x)

# ③	切分数据集，按照7:3比例切分
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

# (3)	模型验证
# ①	使用朴素贝叶斯模型，打印模型得分
sep('使用朴素贝叶斯模型，打印模型得分')
from sklearn.naive_bayes import GaussianNB

model = GaussianNB()
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')

# ②	使用knn算法模型，配合网格搜索交叉验证，验证k值为3,4,5时的最优得分和最优参数
sep('使用knn算法模型，配合网格搜索交叉验证，验证k值为3,4,5时的最优得分和最优参数')
best_score = None
best_params = None
best_model = None
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier

estimator = KNeighborsClassifier()
params = dict(n_neighbors=[3, 4, 5])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score = {grid.best_score_}')
print(f'Best params = {grid.best_params_}')
if best_score is None or grid.best_score_ > best_score:
    best_score = grid.best_score_
    best_params = grid.best_params_
    best_model = type(estimator)

# ③	使用逻辑回归算法模型，配合网格搜索交叉验证，验证正则系数为0.1,0.5,1，和L1，L2正则情况下的最优得分和最优参数
sep('使用逻辑回归算法模型，配合网格搜索交叉验证，验证正则系数为0.1,0.5,1，和L1，L2正则情况下的最优得分和最优参数')
from sklearn.linear_model import LogisticRegression

estimator = LogisticRegression()
params = dict(solver=['liblinear'],
              multi_class=['auto'],
              penalty=['l1', 'l2'],
              C=[0.1, 0.5, 1])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score = {grid.best_score_}')
print(f'Best params = {grid.best_params_}')
if best_score is None or grid.best_score_ > best_score:
    best_score = grid.best_score_
    best_params = grid.best_params_
    best_model = type(estimator)

# ④	使用决策树算法模型，配合网格搜索交叉验证，验证最大深度分别为3,4,5情况下的最优得分和最优参数
sep('使用决策树算法模型，配合网格搜索交叉验证，验证最大深度分别为3,4,5情况下的最优得分和最优参数')
from sklearn.tree import DecisionTreeClassifier

estimator = DecisionTreeClassifier()
params = dict(max_depth=[3, 4, 5])
grid = GridSearchCV(estimator, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score = {grid.best_score_}')
print(f'Best params = {grid.best_params_}')
if best_score is None or grid.best_score_ > best_score:
    best_score = grid.best_score_
    best_params = grid.best_params_
    best_model = type(estimator)

# (4)	模型测评
# ①	使用上面结果中最优得分的参数模型重新构建模型，训练模型
sep('使用上面结果中最优得分的参数模型重新构建模型，训练模型')
print(f'Best score = {best_score}')
print(f'Best params = {best_params}')
print(f'Best model = {best_model}')
model = best_model(**best_params)
model.fit(x_train, y_train)

# ②	打印模型的准确率，分类报告，混淆矩阵信息
sep('打印模型的准确率，分类报告，混淆矩阵信息')
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
h_test = model.predict(x_test)
print(f'模型的准确率: {accuracy_score(y_test, h_test)}')
print('模型的分类报告: ')
print(classification_report(y_test, h_test))
print('模型的混淆矩阵信息: ')
print(confusion_matrix(y_test, h_test))

# 2.	使用jiaba和词云完成以下操作
import jieba
from wordcloud import WordCloud

# 题目：
# ‘王老师饿了一天没吃东西，看到其他老师桌子上有苹果，以帮人洗苹果为由，吃别人的苹果’
text = '王老师饿了一天没吃东西，看到其他老师桌子上有苹果，以帮人洗苹果为由，吃别人的苹果'
# (1)	分词处理
# ①	将题目中的语句使用jieba进行分词
sep('将题目中的语句使用jieba进行分词')
result = jieba.lcut(text, cut_all=False)
print('/'.join(result))

# ②	去除单词长度小于等于1的词
sep('去除单词长度小于等于1的词')
result = [wd for wd in result if len(wd) > 1]
print('/'.join(result))

# (2)	词云处理
# ①	创建词云处理
# ②	将题目处理后的语句的词频进行词云可视化处理
sep('将题目处理后的语句的词频进行词云可视化处理')
text = ' '.join(result)
pic = WordCloud(background_color='white',
                width=800,
                height=800,
                margin=5).generate(text)
plt.imshow(pic)
plt.axis('off')
plt.show()
