from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer
import jieba

with open("blog.txt", "r", encoding="utf-8") as file:
    chinese_text = file.read()

# 使用jieba分词器进行分词
word_list = jieba.lcut(chinese_text)

# 将分词后的结果拼接为字符串
chinese_text = " ".join(word_list)

# 读取文本文件并创建解析器
parser = PlaintextParser.from_string(chinese_text, Tokenizer("chinese"))

# 创建摘要生成器
summarizer = LsaSummarizer()

# 提取3句摘要
summary = summarizer(parser.document, 3)

# 打印摘要
for sentence in summary:
    print(sentence)
