# encoding: utf-8

import re
import pandas as pd
import jieba

from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline

from utils.data import load_txt, split_text

txt_path = "../datas/train.txt"

texts, labels = split_text(load_txt(txt_path))

datas = []
for text, label in zip(texts, labels):
    datas.append({"内容": text, "类型": label})

data = pd.DataFrame(datas)

# print(data.shape)
# print(data["类型"].value_counts())
"""
(300, 2)
A    92
B    89
D    64
E    24
F     7
G     7
J     7
C     4
H     4
I     2
Name: 类型, dtype: int64
"""


# 中文分词
def chinese_word_cut(mytext):
    # 文本预处理 ：去除一些无用的字符只提取出中文出来
    new_data = re.findall('[\u4e00-\u9fa5]+', mytext, re.S)
    new_data = " ".join(new_data)

    # 文本分词
    seg_list_exact = jieba.cut(new_data, cut_all=True)
    result_list = []
    # 加载停用词库
    with open('../datas/stopwords.txt', encoding='utf-8') as f:  # 可根据需要打开停用词库，然后加上不想显示的词语
        stop_words = set()
        for i in f.readlines():
            stop_words.add(i.replace("\n", ""))  # 去掉读取每一行数据的\n
    # 去除停用词
    for word in seg_list_exact:
        if word not in stop_words and len(word) > 1:
            result_list.append(word)
    return " ".join(result_list)


data['分词结果'] = data['内容'].apply(chinese_word_cut)
# print(data.head())
"""
                                          内容 类型               分词结果
0  HG标准镀锌4.8级全螺纹螺柱带加厚螺母平垫弹垫，M39-4.0*150，8套/箱  A  标准 镀锌 螺纹 螺柱 加厚 螺母
1  HG标准镀锌4.8级全螺纹螺柱带加厚螺母平垫弹垫，M39-4.0*160，8套/箱  A  标准 镀锌 螺纹 螺柱 加厚 螺母
2  HG标准镀锌4.8级全螺纹螺柱带加厚螺母平垫弹垫，M39-4.0*170，8套/箱  A  标准 镀锌 螺纹 螺柱 加厚 螺母
3  HG标准镀锌4.8级全螺纹螺柱带加厚螺母平垫弹垫，M39-4.0*180，8套/箱  A  标准 镀锌 螺纹 螺柱 加厚 螺母
4  HG标准镀锌4.8级全螺纹螺柱带加厚螺母平垫弹垫，M39-4.0*190，8套/箱  A  标准 镀锌 螺纹 螺柱 加厚 螺母
"""

# 构建TF-IDF模型

# vectorizer = CountVectorizer()
# transformer = TfidfTransformer()
# tfidf = transformer.fit_transform(vectorizer.fit_transform(data['分词结果']))
# tfidf_weight = tfidf.toarray()

# print(tfidf_weight)
# print(tfidf_weight.shape)
"""
[[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
(300, 72)
"""
# 由于使用上述代码最后会造成词袋长度过大，导致维度灾难，所以对上述代码进行修改，加上了降维的操作，使得离散的特征能集中化，也能提高最后模型分类的准确率。

vectorizer = CountVectorizer()
svd = TruncatedSVD(50)  # 降到50维
normalizer = Normalizer(copy=False)  # 标准化
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(vectorizer.fit_transform(data['分词结果']))
# print(X.shape)  # (300, 50)

# 接着再构建TF-IDF模型


transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X)
tfidf_weight = tfidf.toarray()
print(tfidf_weight.shape)  # (300, 50)

# KMeans聚类
# 因为在前面我们已经知道了该数据集是分为10类的，所里这里的K选取10


kmeans = KMeans(n_clusters=10)
kmeans.fit(tfidf_weight)
# 打印出各个簇的中心点
# print("中心点坐标：")
# print(kmeans.cluster_centers_)
# for index, label in enumerate(kmeans.labels_, 1):
#     print("index: {}, label: {}".format(index, label))
# 样本距其最近的聚类中心的平方距离之和，用来评判分类的准确度，值越小越好
# k-means的超参数n_clusters可以通过该值来评估
print("效果评估值：")
print("inertia: {}".format(kmeans.inertia_))

"""
效果评估值：
inertia: 30.540635282789573

"""

# # 使用T-SNE算法，对权重进行降维，准确度比PCA算法高，但是耗时长
# tsne = TSNE(n_components=2)
# decomposition_data = tsne.fit_transform(tfidf_weight)
#
# x = []
# y = []
#
# for i in decomposition_data:
#     x.append(i[0])
#     y.append(i[1])
#
# fig = plt.figure(figsize=(10, 10))
# ax = plt.axes()
# plt.scatter(x, y, c=kmeans.labels_, marker="x")
# plt.xticks(())
# plt.yticks(())
# plt.show()
