# import csv
# import re
#
# import jieba
# import jionlp
#
# import jieba.analyse as anls
#
# rcsv = open('data/data.csv', 'r', encoding="utf-8")
# reader = csv.reader(rcsv)
# data = list(reader)
# # 读取THUOCL_it.txt
# it_words = set()
# with open('data/THUOCL_it.txt', 'r', encoding='utf-8') as f:
#     for line in f:
#         split_result = line.strip().split('\t')
#         if len(split_result) == 2:
#             word, _ = split_result
#             it_words.add(word.lower())
#         elif len(split_result) == 1:
#             word = split_result[0]
#             it_words.add(word.lower())
# print(it_words)
#
#
# # 定义模糊匹配函数
# def fuzzy_match(word, it_words):
#     pattern = re.compile('.*{}.*'.format(re.escape(word)), re.IGNORECASE)
#     for it_word in it_words:
#         if pattern.match(it_word):
#             return it_word
#     return None
#
#
# # 定义精确匹配函数
# def exact_match(t_word, words):
#     tt_word = t_word.lower()
#     if tt_word in words:
#         return t_word  # 返回原始单词，而不是转换为小写的单词
#     return None
#
#
# result = []
# for i in range(1, len(data)):
#     description = data[i][5]
#     # print(jionlp.extract_chinese(description))
#     # seg_list3 = jieba.cut(description)
#     # print('默认精确模式：', '/'.join(seg_list3))
#     tags = jieba.analyse.extract_tags(description, topK=5)
#     print(u"关键词:")
#     print(" ".join(tags))
#     # 匹配
#     for word in tags:
#         it_word = exact_match(word, it_words)
#         if it_word:
#             print(it_word)
#     print('----------------------------------------------------------------------------------------------------')

    # for x, w in anls.extract_tags(description, topK=5, withWeight=True):
    #     print(str(i) + ':%s' % x)
