import collections
import os
import re

# line = '(百日咳)<-[:acompany_with {}]-(小儿咳嗽)'
#
# matchObj = re.match(r'[(](.*)[)].+[(](.*)[)]', line)
#
# print(matchObj.group(1))
# print(matchObj.group(2))
#
# typ = {'a': ['disease', 'symptom'], 'b': ['symptom']}
# print(typ.values())

# answers = [{'m.name': '感冒', 'r.name': '症状', 'n.name': '咽喉干燥及灼热感'}, {'m.name': '感冒', 'r.name': '症状', 'n.name': '鼻塞'},
#            {'m.name': '感冒', 'r.name': '症状', 'n.name': '流鼻涕'}, {'m.name': '感冒', 'r.name': '症状', 'n.name': '浑身忽冷忽热'},
#            {'m.name': '感冒', 'r.name': '症状', 'n.name': '发烧'}, {'m.name': '感冒', 'r.name': '症状', 'n.name': '咽痛'},
#            {'m.name': '感冒', 'r.name': '症状', 'n.name': '情绪性感冒'}, {'m.name': '感冒', 'r.name': '症状', 'n.name': '头痛'},
#            {'m.name': '感冒', 'r.name': '症状', 'n.name': '发热伴寒战'}, {'m.name': '干酪性鼻炎', 'r.name': '症状', 'n.name': '鼻干燥'},]
# from operator import itemgetter
# from itertools import groupby
#
# for date, items in groupby(answers, key=itemgetter('m.name')):
#     print(date, list(items))


# from bert_serving.client import BertClient
# import numpy as np
#
#
# def cosine_similarity(u, v):
#     """
#     Cosine similarity reflects the degree of similariy between u and v
#     Arguments:
#         u -- a word vector of shape (n,)
#         v -- a word vector of shape (n,)
#     """
#     distance = 0.0
#
#     # Compute the dot product between u and v
#     dot = np.dot(u, v.T)
#     # Compute the L2 norm of u
#     norm_u = np.sqrt(np.sum(u ** 2))
#     # Compute the L2 norm of v
#     norm_v = np.sqrt(np.sum(v ** 2))
#     # Compute the cosine similarity defined by formula (1)
#     cosine_similarity = dot / (norm_u * norm_v)
#     return cosine_similarity
#
#
# def edit_distance(str1, str2):
#     dp = [[0] * len(str1)] * len(str2)  # dp[i][j]表示表示A串从第0个字符开始到第i个字符和B串从第0个
#     # 字符开始到第j个字符，这两个字串的编辑距离。字符串的下标从1开始。
#     len1 = len(str1);
#     len2 = len(str2);
#     dp = np.array(dp)
#     # print(dp)
#     # print(len1, len2)
#     # 初始化
#     for i in range(len1):
#         dp[0][i] = i;
#     for j in range(len2):
#         dp[j][0] = j;
#     # print(dp)
#     for i in range(1, len2):
#         for j in range(1, len1):
#             # print(i, j)
#             if (str1[j] == str2[i]):
#                 flag = 0
#             else:
#                 flag = 1
#             #
#             # print(dp)
#             # print(flag)
#             dp[i][j] = min(dp[i - 1][j] + 1, min(dp[i][j - 1] + 1, dp[i - 1][j - 1] + flag));
#             # dp[i-1][j]+1表示删掉字符串str2最后一个字符str2[i]
#             # dp[i][j-1]+1表示给字符串添加str1最后一个字符
#             # dp[i-1][j-1]+flag表示改变,相同则不需操作次数,不同则需要,用flag记录
#
#     # return dp, dp[len2 - 1][len1 - 1]
#     if dp[len2 - 1][len1 - 1] == 0:
#         return 0
#     else:
#         return 1/dp[len2 - 1][len1 - 1]
#
#
# def simCal(word, entities,entities_vec):
#     """
#     计算词语和字典中的词的相似度
#     相同字符的个数/min(|A|,|B|)   +  余弦相似度
#     :param word: str
#     :param entities:List
#     :return:
#     """
#     a = len(word)
#     scores = []
#
#     for index,entity in enumerate(entities):
#         sim_num = 0
#         b = len(entity)
#         c = len(set(entity + word))
#         temp = []
#         for w in word:
#             if w in entity:
#                 sim_num += 1
#         # if sim_num != 0:
#         score1 = sim_num / c  # overlap score
#         temp.append(score1)
#         print('score1',score1)
#         try:
#             score2 = cosine_similarity(bc.encode([word]), entities_vec[index])  # 余弦相似度分数
#             print('score2',score2)
#             temp.append(score2)
#         except:
#             pass
#         score3 = 1 - edit_distance(word, entity) / (a + b)  # 编辑距离分数
#         if score3:
#             temp.append(score3)
#             print('score3',score3)
#         score = sum(temp) / len(temp)
#         # if score >= 0.7:
#         #     scores.append((entity, score))
#         scores.append((entity, score))
#
#     scores.sort(key=lambda k: k[1], reverse=True)
#
#     return scores
#
# import faiss
# from bert_serving.client import BertClient
#
# bc = BertClient()  # 这里的IP地址指向服务器
# print('执行')
# targets = ['蓝鼓膜与胆固醇肉芽肿', '耳鼓膜穿孔', '脑积水','脑膜炎','牙齿感觉过敏症','脑瘫']
# temp = bc.encode(targets)
# print('编码')
# dimension = 768
# index_ip = faiss.IndexFlatIP(dimension)
# index_l2 = faiss.IndexFlatL2(dimension)
# index_ip.add(temp)
# print('特征数量：',index_ip.ntotal) # 打印特征数量
# topK = 6
# while 1:
#     temp = input('用户：')
#     answer = bc.encode([temp])
#     D, I = index_ip.search(answer,topK)
#     D2,I2 = index_l2.search(answer,topK)
#     print(D,I)
#
#     for index in I[0]:
#         print(targets[index])
#     print(D2, I2)
#     for index in I2[0]:
#         print(targets[index])
#     print('_____________________')
# print(temp)
# dictionary = {}
# for i in range(len(targets)):
#     dictionary[targets[i]] = temp[i]
# while 1:
#     question = input("用户：")
#     answer = bc.encode([question])
#
#     print('shape：', answer.shape)
#     # for index, item in enumerate(temp):
#     #     cos = cosine_similarity(item, answer)
#     #     dp = edit_distance(question,targets[index])
#     #     print(targets[index], cos[0],dp)
#     print(simCal(question,targets,temp))
#     # print(simCal(question,targets,temp)[0][0])

# import jieba
# while 1:
#     question = input('用户：')
#     seg_list = jieba.cut(question, cut_all=False, HMM=True)
#     print("Default Mode: " + "/ ".join(seg_list))  # 默认模式


# region_file = open('dict/region.txt', 'w+')

# # cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
# #
# # disease_path = os.path.join(cur_dir, 'dict/disease.txt')
# # department_path = os.path.join(cur_dir, 'dict/department.txt')
# # check_path = os.path.join(cur_dir, 'dict/check.txt')
# # drug_path = os.path.join(cur_dir, 'dict/drug.txt')
# # food_path = os.path.join(cur_dir, 'dict/food.txt')
# # producer_path = os.path.join(cur_dir, 'dict/producer.txt')
# # symptom_path = os.path.join(cur_dir, 'dict/symptom.txt')
# # deny_path = os.path.join(cur_dir, 'dict/deny.txt')
# # disease_wds = [i.strip() for i in open(disease_path, encoding='UTF-8') if i.strip()]
# # department_wds = [i.strip() for i in open(department_path, encoding='UTF-8') if i.strip()]
# # check_wds = [i.strip() for i in open(check_path, encoding='UTF-8') if i.strip()]
# # drug_wds = [i.strip() for i in open(drug_path, encoding='UTF-8') if i.strip()]
# # food_wds = [i.strip() for i in open(food_path, encoding='UTF-8') if i.strip()]
# # producer_wds = [i.strip() for i in open(producer_path, encoding='UTF-8') if i.strip()]
# # symptom_wds = [i.strip() for i in open(symptom_path, encoding='UTF-8') if i.strip()]
# # region_words = set(
# #             department_wds + disease_wds + check_wds + drug_wds + food_wds + producer_wds + symptom_wds)
# # deny_words = [i.strip() for i in open(deny_path, encoding='UTF-8') if i.strip()]
# #
# # region_file.write('\n'.join(list(region_words)))
# # region_file.close()

# dic = {'ha':[1],'pi':[2]}
#
# if 'ha' in dic.keys():
#     if 2 in dic['ha']:
#         print('不用添加')
#     elif 2 not in dic['ha']:
#         print('执行添加操作')
#         dic['ha'].append(2)
#
# print(dic)


'''
    特定领域分词工具pkuseg
'''
# import pkuseg
#
# seg = pkuseg.pkuseg(model_name='medicine',user_dict='dict/region.txt',postag=True)  # 程序会自动下载所对应的细领域模型
# while 1:
#     question = input('用户：')
#     text = seg.cut(question)              # 进行分词
#     print(text)
# q = collections.deque(
#     [('急性', 'b'), ('肠胃炎', 'n'), ('可以', 'v'), ('吃', 'v'), ('茄子', 'n'), ('煲', 'v'), ('豆腐', 'n'), ('吗', 'y'),
#      ('[SEP]', 'sep')])

# q = collections.deque([('吗', 'y'), ('[SEP]', 'sep'), ('可以', 'v')])
#
# while q[1][0] != '[SEP]':
#
#
# if q[1][0] == '[SEP]':
#     q.extend([q.popleft(), q.popleft()])
import ahocorasick
actree = ahocorasick.Automaton()
print(type(actree))