#!/usr/bin/env python
# encoding: utf-8
'''
@author: Justin Ruan
@license: 
@contact: ruanjun@whut.edu.cn
@time: 2019-12-28
@desc:
'''

import os, csv, re
from pypinyin import lazy_pinyin, pinyin, Style
from tqdm import tqdm
# from pyhanlp import *
from bidict import bidict
import networkx as nx
import matplotlib.pyplot as plt

from src.util import get_project_root

PROJECT_ROOT = get_project_root()


def sort_pinyin(hanzi_list):
    hanzi_list_pinyin = []
    hanzi_list_pinyin_alias_dict = {}
    for single_str in hanzi_list:
        # py_r = lazy_pinyin(single_str)
        py_r = pinyin(single_str, style=Style.TONE2)
        single_str_py = ''
        for py_list in py_r:
            single_str_py = single_str_py + py_list[0]
        hanzi_list_pinyin.append(single_str_py)
        hanzi_list_pinyin_alias_dict[single_str_py] = single_str
    hanzi_list_pinyin.sort()
    sorted_hanzi_list = []
    for single_str_py in hanzi_list_pinyin:
        sorted_hanzi_list.append(hanzi_list_pinyin_alias_dict[single_str_py])
    return sorted_hanzi_list


class Vocabulary(object):

    def __init__(self):
        self.vocabulary_path = PROJECT_ROOT + "//data//vocabulary"
        # NER可以推理的实体
        self.entity_type = {"治疗": "TREATMENT",
                            "身体部位": "BODY",
                            "症状和体征": "SIGNS",
                            "疾病和诊断": "DISEASE",
                            "检查和检验": "CHECK",
                            }
        # 使用规则进行推理的部分
        self.extra_type = {
            "症状和体征A": "SIGNS_AFF",
            "分割子句": "SECTION",
            "结果": "RESULT"
        }

        self.SET_TREATMENT = set()
        self.SET_BODY = set()
        self.SET_SIGNS = set()
        self.SET_SIGNS_AFF = set()
        self.SET_DISEASE = set()
        self.SET_CHECK = set()
        self.SET_SECTION = set()

        self.SET_ENTITIES = [("治疗", self.SET_TREATMENT),
                             ("身体部位", self.SET_BODY),
                             ("症状和体征", self.SET_SIGNS),
                             ("疾病和诊断", self.SET_DISEASE),
                             ("检查和检验", self.SET_CHECK)]

        self.EXTRA_ENTITIES = [("症状和体征A", self.SET_SIGNS_AFF),
                               ("分割子句", self.SET_SECTION)]

    def prepare_vocabulary(self, vocabulary_name):
        vocabulary_filename = "{}//{}.txt".format(self.vocabulary_path, vocabulary_name)
        voc_set = set()
        with open(vocabulary_filename, 'r', encoding='utf-8') as load_f:
            text_lines = load_f.readlines()
            for line in text_lines:
                words = line.strip().split(',')
                for w in words:
                    if len(w) > 0:
                        voc_set.add(w)

        sorted_voc = sort_pinyin(voc_set)
        print(sorted_voc, "\n", len(sorted_voc))

        with open(vocabulary_filename, 'w', encoding='utf-8') as write_f:
            for word in sorted_voc:
                write_f.write(word)
                write_f.write("\n")

    def load_vocabulary(self):
        for entities, entity_types in [(self.SET_ENTITIES, self.entity_type),
                                       (self.EXTRA_ENTITIES, self.extra_type)]:
            for name, voc_set in entities:
                entity_code = entity_types[name]
                vocabulary_filename = "{}//{}.txt".format(self.vocabulary_path, entity_code)

                with open(vocabulary_filename, 'r', encoding='utf-8') as load_f:
                    text_lines = load_f.readlines()
                    for line in text_lines:
                        words = line.strip().split(',')
                        for w in words:
                            if len(w) > 0:
                                voc_set.add(w.upper())

        self.SET_SIGNS = self.SET_SIGNS.union(self.SET_SIGNS_AFF)
        self.SET_ENTITIES = [("治疗", self.SET_TREATMENT),
                             ("身体部位", self.SET_BODY),
                             ("症状和体征", self.SET_SIGNS),
                             ("疾病和诊断", self.SET_DISEASE),
                             ("检查和检验", self.SET_CHECK)]
        return


class BodyStructure(object):
    def __init__(self, offset=0):
        self.vocabulary_path = PROJECT_ROOT + "//data//vocabulary"
        self.body_nodes = bidict()
        self.body_relations = {}
        self.offset = offset

    def load_body_structure(self):
        filename = "{}//人体结构.csv".format(self.vocabulary_path)
        with open(filename, 'r', encoding="UTF-8") as f:
            reader = csv.reader(f)
            for i, row in enumerate(reader):
                if i > 0:
                    id = int(row[0]) + self.offset
                    name = row[1]
                    father_id = int(row[2]) + self.offset
                    # print(id,name, farther_id)
                    assert name not in self.body_nodes.inverse, "重复"
                    self.body_nodes[id] = name
                    if father_id >= 0:
                        self.body_relations[id] = father_id

    def get_body_id(self, name):
        inv_body_nodes = self.body_nodes.inverse
        name = name.strip()
        if name in inv_body_nodes.keys():
            return inv_body_nodes[name]
        else:
            return -1

    def draw(self):
        G = nx.Graph()
        G.add_node(0, type='B', name="人体")

        for id, entity in self.body_nodes.items():
            G.add_node(id, type='B', name=entity)

        for id, father_id in self.body_relations.items():
            G.add_edge(id, father_id, value="B")

        plt.clf()

        plt.rcParams['font.family'] = ['sans-serif']
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        fig = plt.figure(num="人体结构", figsize=(20, 12), dpi=100, )

        # pos 指的是布局 主要有spring_layout , random_layout,circle_layout,shell_layout。circular_layout
        nx.draw(G, pos=nx.spring_layout(G), node_color='b', edge_color='r',
                labels=self.body_nodes,
                with_labels=True, font_size=12, node_size=20)
        plt.show()
        # filename = "{}//results//人体结构.jpg".format(PROJECT_ROOT
        # plt.savefig(filename)
