import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN
# 读取文本数据
with open('test1.txt', 'r', encoding='utf-8') as f:
    documents = f.readlines()[:500]

# 2.加载停用词
stopword_path = "stop_words.txt"
with open(stopword_path, 'r', encoding='utf-8') as f:
    stop_words = [line.strip() for line in f]


# 分词处理
corpus = []
for doc in documents:
    words = jieba.cut(doc.strip())
    corpus.append(' '.join(words))
print(corpus)
# 4.特征提取

# 4.1 文本转换成词袋模型(词频作为统计指标)   加载停用词,添加词语进词袋时会过滤停用词
countVectorizer = CountVectorizer(stop_words=stop_words, analyzer="word")
count_v = countVectorizer.fit_transform(corpus)

# 词袋中的词语
print("词袋中的词语：\n")
print(countVectorizer.get_feature_names_out())
# 词频向量
print("词频向量：\n")
print(count_v.toarray())

# 4.2 词频统计指标转换 tf-idf统计指标  (不是必须的,用哪种指标根据具体业务来看)
tfidfTransformer = TfidfTransformer()
tfidf = tfidfTransformer.fit_transform(count_v)
print("tf-idf统计指标")
print(tfidf.toarray())

# 4.3 对词频向量进行降维
# 主成分分析方法降维, 降成2维
pca = PCA(n_components=2)
pca_weights = pca.fit_transform(tfidf.toarray())
print("降维后的词频向量")
print(pca_weights)

# 5.聚类计算 (这里用dbscan算法)
clf = DBSCAN(eps=0.16, min_samples=2)
y = clf.fit_predict(pca_weights)
# 每个文本对应的簇的编号 (-1 在dbscan中属于噪音簇,里面都是噪音点)
print("result")
print(y)

# 6.打印
result = {}
for text_idx, label_idx in enumerate(y):
    key = "cluster_{}".format(label_idx)
    if key not in result:
        result[key] = [text_idx]
    else:
        result[key].append(text_idx)

for clu_k, clu_v in result.items():
    print("\n", "~" * 170)
    print(clu_k)
    print(clu_v)

    for i in clu_v:
        print(text_list[i], "\n===============================>")


