from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import numpy as np
import re
def extract_text_from_txt(txt_path):
    text = ""

    with open(txt_path, encoding="utf-8") as f:
      text = f.read()

    # 确保 input_text 是字符串类型
    if not isinstance(text, str):
        text = str(text)

    return text

def topic_based_chunk(text, num_topics=3):

   # 文本分割
   sentences = re.split(r'\n', text)
   #sentences = re.split(r'(?<=[.?!。？！\n])\s+', text)

   # 得到句子的词频矩阵 sentence_vectors
   vectorizer = CountVectorizer()
   sentence_vectors = vectorizer.fit_transform(sentences)

   # 主题模型训练
   lda = LatentDirichletAllocation(n_components=num_topics, random_state=42)
   lda.fit(sentence_vectors)

   # 得到topics[]主题关键词
   topic_word = lda.components_
   vocabulary = vectorizer.get_feature_names_out()
   topics = []
   for topic_idx, topic in enumerate(topic_word):
       top_words_idx = topic.argsort()[:-6:-1]  # 取前5个关键词
       topic_keywords = [vocabulary[i] for i in top_words_idx]
       topics.append(f"Topic {topic_idx + 1}: {', '.join(topic_keywords)}")

   # 得到chunks_with_topics[]主题分块
   # 为每个句子分配其主题，计算其在各个主题上的概率分布，并选择概率最大的主题作为该句子的主题。
   chunks_with_topics = []
   for i, sentence in enumerate(sentences):
       topic_assignments = lda.transform(vectorizer.transform([sentence]))
       assigned_topic = np.argmax(topic_assignments)
       chunks_with_topics.append((topics[assigned_topic], sentence))

   return chunks_with_topics

# 示例用法
if __name__ == "__main__":
    path = r'Chunk\topic_chunking\test_topic_chunking.txt'
    text = extract_text_from_txt(path)
    # 需要指定超参数num_topics，即要提取的主题数量。这个参数可以根据具体的文本内容和需求进行调整。
    topic_chunks = topic_based_chunk(text, num_topics=3)
    for i, (topic, chunk) in enumerate(topic_chunks):
        print(f"\nChunk ID: {i} - {topic}:\n{chunk}\n")