from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import PCA

# 1.加载停用词
stopword_path = "G:\信息抽取\实验3\stop_word.txt"
file2 = open(stopword_path, 'r', encoding='utf-8')
stop_words = [line.strip() for line in file2]

print("停用词:\n")
for i in range(len(stop_words)):
    if i % 200 == 0:
        print(stop_words[i])

# 2.获取分词
corpus = []
with open("G:\信息抽取\实验3\cut_word2.txt", encoding='utf-8') as file2:
    corpus = [line.strip() for line in file2]

# 特征提取

# 3.1 文本转换成词袋模型(词频作为统计指标)   加载停用词,添加词语进词袋时会过滤停用词
countVectorizer = CountVectorizer(stop_words=stop_words, analyzer="word")
count_v = countVectorizer.fit_transform(corpus)

# 词袋中的词语
print(countVectorizer.get_feature_names_out())
# 词频向量
print(count_v.toarray())

# 3.2 词频统计指标转换 tf-idf统计指标  (不是必须的,用哪种指标根据具体业务来看)
tfidfTransformer = TfidfTransformer()
tfidf = tfidfTransformer.fit_transform(count_v)
print(tfidf.toarray())

# 3.3 对词频向量进行降维 (不是必须的步骤, 因为下面使用 DBSCAN算法,它不适合太高维度计算所有进行降维)
# 主成分分析方法降维, 降成2维
pca = PCA(n_components=2)
print("降维后的数据")
pca_weights = pca.fit_transform(tfidf.toarray())
print(pca_weights)

#，利用肘部法，选择合适的k值
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans
score =[]
for i in range(2,10):
    minibatch = MiniBatchKMeans(n_clusters=i)
    y_fit = minibatch.fit(pca_weights)
    score.append(y_fit.inertia_)#
plt.plot(range(2,10),score)
plt.show()

import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans
# from sklearn.cluster import Mini-Batch
minibatch = MiniBatchKMeans(n_clusters=3)
y = minibatch.fit_predict(pca_weights)
plt.scatter(pca_weights[:,0],pca_weights[:,1],c=y)
y

print(minibatch.inertia_)
result = {}
for text_idx, label_idx in enumerate(y):
    key = "cluster_{}".format(label_idx)
    if key not in result:
        result[key] = [text_idx]
    else:
        result[key].append(text_idx)

for clu_k, clu_v in result.items():
    print("\n", "~" * 170)
    print(clu_k)
    print(clu_v)
    for i in clu_v:
        print(corpus[i], "\n===============================>")

import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans
plt.rcParams['font.sans-serif']=['SimHei']
# from sklearn.cluster import Mini-Batch
plt.subplot(121)
minibatch = MiniBatchKMeans(n_clusters=5,batch_size=10,random_state=42)
y1 = minibatch.fit_predict(pca_weights)
plt.scatter(pca_weights[:,0],pca_weights[:,1],c=y1)
plt.title("批量抽取={10}")

plt.subplot(122)
minibatch = MiniBatchKMeans(n_clusters=5,batch_size = 50,random_state=42)
y2 = minibatch.fit_predict(pca_weights)
plt.scatter(pca_weights[:,0],pca_weights[:,1],c=y2)
plt.title("批量抽取={50}")

