import jieba
import key_words


def add_words(words):
    for i in words:
        jieba.add_word(i)


for task in key_words.TASK_LIST:
    add_words(task.get("word_key_list"))


def fenci(context):
    # 短文本相似度
    base_set = set(jieba.cut(context))

    data = []
    for task in key_words.TASK_LIST:
        data.append((
            task, len(set(task.get("word_key_list")) & base_set)
        ))
    return sorted(data, key=lambda x: x[1], reverse=True)[0]
