import jieba

# 按需加载词典
jieba.load_userdict("userdict.txt")

# 设置停用词文件路径
stopwords_path = "stopwords.txt"


# 分词函数
def segment_text(text):
    # 使用精确模式分词
    seg_list = jieba.cut(text, cut_all=False)

    # 去除停用词
    stopwords = load_stopwords(stopwords_path)
    seg_list = filter(lambda x: x not in stopwords, seg_list)

    # 返回分词结果
    return " ".join(seg_list)


# 加载停用词
def load_stopwords(path):
    stopwords = set()
    with open(path, "r", encoding="utf-8") as file:
        for line in file:
            stopwords.add(line.strip())
    return stopwords


if __name__ == '__main__':
    # 示例文本
    text = "今天天气不错，可以出门散步。"

    # 调用分词函数进行分词
    segmented_text = segment_text(text)

    # 输出分词结果
    print("分词结果：" + segmented_text)
