from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_distances
import numpy as np
from tn import TemporalNetwork
import pandas as pd
import jieba

import os

if __name__ == "__main__":
    dir_path = "E:\\projects\\江苏数据"
    tn = TemporalNetwork(tn_top_k=200, max_path=10, min_sim=0.7,
                         tn_max_step=6,
                         max_iou=1e-5, min_length=4)

    with open("..\\auto_cut\\data\\stopwords.txt", 'r', encoding='utf-8') as f:
        stopwords = [line.strip() for line in f.readlines()]
    for channel in os.listdir(dir_path):
        if channel != "227": continue
        cut_csv = os.path.join(dir_path, channel, "interval.csv")
        df = pd.read_csv(cut_csv)
        emb = np.load(os.path.join(dir_path, channel, "00-00-00-1.npy"))
        texts = []
        for index, row in df.iterrows():
            if isinstance(row['content'], float):
                texts.append('')
            else:
                start = row['start']
                end = row['end']
                words = jieba.lcut(row['content'])  # 使用 jieba 分词
                cuts = [word for word in words if word not in stopwords]
                txt = ''.join(cuts)
                texts.append(txt)

        # 使用TfidfVectorizer进行特征提取
        vectorizer = TfidfVectorizer()
        X = vectorizer.fit_transform(texts)
        distances = cosine_distances(X)
        # 使用KMeans进行聚类
        eps = 0.99
        min_samples = 1

        dbscan = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed')
        clusters = dbscan.fit_predict(distances)
        # 输出聚类结果
        cluster_texts = {}

        # 遍历每个文本及其对应的聚类标签
        for i, label in enumerate(clusters):
            if label not in cluster_texts:
                cluster_texts[label] = []
            cluster_texts[label].append(texts[i])

        # 输出每个聚类标签下的文本
        for label, texts_in_cluster in cluster_texts.items():
            print(f"聚类标签: {label}")
            for text in texts_in_cluster:
                print(f"  - {text}")
            print("\n")
