#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Justin'
__mtime__ = '2020-12-16'

"""


import csv
import os
import re

import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from bidict import bidict
# from pyhanlp import HanLP

import util
from vocabulary import Vocabulary, BodyStructure
from syntax.biaffine_parser import Biaffine_Parser

PROJECT_ROOT = util.get_project_root()


class Tag_Assembler(object):

    def __init__(self):
        self.root_path = PROJECT_ROOT + "//data//"
        self.voc = Vocabulary()
        self.body_structure = None

        self.entity_type = {"治疗": "TREATMENT",
                            "身体部位": "BODY",
                            "症状和体征": "SIGNS",
                            "症状和体征A": "SIGNS_AFF",
                            "疾病和诊断": "DISEASE",
                            "检查和检验": "CHECK",
                            "分割子句": "SECTION",
                            "结果": "RESULT"}

        self.entity_code = {
            "治疗": "T",
            "身体部位": "B",
            "症状和体征": "S",
            "症状和体征A": "F",
            "疾病和诊断": "D",
            "检查和检验": "C",
            "结果": "R",
            "分割子句": "$",
        }

        self.relation_type = {
            "T": "treated",
            "B": "relate_to",
            "S": "has_symptom",
            "F": "has",
            "D": "located_at",
            "C": "checked",
            "R": "has_result",
            "A": "record",
        }
        self.SET_TREATMENT = set()
        self.SET_BODY = set()
        self.SET_SIGNS = set()
        self.SET_SIGNS_AFF = set()
        self.SET_DISEASE = set()
        self.SET_CHECK = set()

        self.body_root = None
        self.first_body_root = None
        self.p_relations, self.c_relations = None, None
        self.syntax = Biaffine_Parser()

    def loading_single_instance(self, relative_path, filename):
        '''
        加载一个病理诊断记录数据，格式化数据
        人工标注文本*.txt：包括多行文本，每行包括四列，短语，短语的开始位置，结束位置，该短语的标签（BI）
        :param relative_path: 相对于self.root_path = PROJECT_ROOT + "//data//"路径下的子目录
        :param filename: 记录的文件名
        '''
        record_path = os.path.join(self.root_path + relative_path, filename)
        original_path = os.path.join(self.root_path + relative_path, filename.replace(".txt", ".txtoriginal.txt"))
        print("Input: ", record_path, "\n", original_path)

        with open(original_path, 'r', encoding='utf-8') as original_file:
            # 过滤逗号, 将中文逗号变为英文的
            original_txt = original_file.readline().replace('(', '（').replace(')', '）').replace(',', '，')
        assert len(original_txt) > 0, "读到错误的original_txt"

        records = {}
        original_txt_block = {}
        block = []
        section_pos = [0]
        entity_count ={"B":0, "X":0}
        for line in open(record_path, 'r', encoding='UTF-8'):
            # 读入每行数据：包括四列，短语，短语的开始位置，结束位置，该短语的标签（BI）
            res = line.strip().split('\t')
            if len(res) != 4:
                continue
            entity = res[0]
            begin = int(res[1])
            end = int(res[2])
            txt_label = res[3]
            # 根据“分割子句”Tag，将整个诊断记录分成若干Block
            if txt_label in self.entity_code.keys():
                label = self.entity_code[txt_label]
                if label == "B":
                    entity_count["B"] += 1
                    block.append((entity, label, begin, end + 1))
                elif label in ["T","F","S","D"]:
                    entity_count["X"] += 1
                    block.append((entity, label, begin, end + 1))
                elif label == "$":
                    # 遇到分割标志后，将前面的各行数据打包
                    if entity_count["B"] > 0 and entity_count["X"] > 0:
                        id = len(section_pos)
                        records[id] = block
                        block = []
                        entity_count ={"B":0, "X":0}
                        # 将本block对应的 文字 原文也记录下来，以便进行 句法分析
                        # （文本，全文中的开始位置，结束位置）
                        original_txt_block[id] = (original_txt[section_pos[-1]:begin], section_pos[-1], begin)

                        section_pos.append(end + 1)
                else:
                    # 保存 实体史，标志，在原文开始位置，结束位置（使用全文统一的绝对坐标，起点坐标为0）
                    block.append((entity, label, begin, end + 1))
            else:
                assert False, "读到异常的类型{}！".format(txt_label)
        #  处理最后一个block
        if len(block) > 0:
            id = len(section_pos)
            records[id] = block
            original_txt_block[id] = (original_txt[section_pos[-1] + 1:], section_pos[-1] + 1, len(original_txt))

        return original_txt, records, original_txt_block

    def initialize_dictionary(self, record_list):
        '''
        初始化知识图谱所的字典，记录短语并进行全局性编码
        :param record_list: 两层结构，Block + 列表集合（短语，Tag, Begin, end）
        :return:双向dict，记录编码和短语
        '''
        self.SET_TREATMENT.clear()
        self.SET_BODY.clear()
        self.SET_SIGNS.clear()
        self.SET_SIGNS_AFF.clear()
        self.SET_DISEASE.clear()
        self.SET_CHECK.clear()
        all_set = set()

        for records in record_list:
            for item in records.values():
                for entity, label, begin, end in item:
                    if label == "T":  # 治疗
                        self.SET_TREATMENT.add(entity)
                    elif label == "B":  # 身体部位
                        self.SET_BODY.add(entity)
                    elif label == "S":  # 症状和体征
                        self.SET_SIGNS.add(entity)
                    elif label == "F":  # 症状和体征2
                        self.SET_SIGNS_AFF.add(entity)
                    elif label == "D":  # 疾病和诊断
                        self.SET_DISEASE.add(entity)
                    elif label == "C":  # 检查和检验
                        self.SET_CHECK.add(entity)
                    elif label == "R":
                        continue
                    else:
                        assert label == "R", "读到异常的类型{}！".format(label)

                    all_set.add(entity)

        check_tag = len(all_set) - len(self.SET_TREATMENT) - len(self.SET_BODY) - len(self.SET_SIGNS) \
                    - len(self.SET_DISEASE) - len(self.SET_CHECK) - len(self.SET_SIGNS_AFF)
        assert check_tag == 0, "存在Entity出现在不同的类别的集合中！"

        dict_entity_code = bidict()

        # 前100个节点 为特殊用途区间，比如抽象节点
        # 诊断记录，根节点,
        offset = 0
        # IHC的抽像根节点
        id = 1
        global_id = offset + id
        item = "IHC"
        dict_entity_code[item] = global_id

        # 诊断数据相关部分
        offset = 100
        id = 1
        for item in self.SET_TREATMENT:
            global_id = id + offset
            dict_entity_code[item] = global_id
            id += 1

        offset = 10000
        id = 1
        for item in self.SET_SIGNS:
            global_id = id + offset
            dict_entity_code[item] = global_id
            id += 1

        offset = 20000
        id = 1
        for item in self.SET_DISEASE:
            global_id = id + offset
            dict_entity_code[item] = global_id
            id += 1

        offset = 30000
        id = 1
        for item in self.SET_CHECK:
            global_id = id + offset
            dict_entity_code[item] = global_id
            id += 1

        offset = 40000
        id = 1
        for item in self.SET_SIGNS_AFF:
            global_id = id + offset
            dict_entity_code[item] = global_id
            id += 1

        offset = 50000
        self.body_structure = BodyStructure(offset=offset)
        self.body_structure.load_body_structure()
        for item in self.SET_BODY:
            dict_entity_code[item] = self.body_structure.get_body_id(item)

        return dict_entity_code

    def segment_sentence(self, text, start):
        '''
        按标点符号，将text分割成 段（子句）
        :param text: 输入文本
        :param start: 该本文在整个记录中的字符坐标
        :return: (子句，开始位置，结束位置) 位置坐标是在整个记录的 从0开始的 绝对坐标
        '''
        # pattern = r',|\.|/|;|\'|`|\[|\]|<|>|\?|:|"|\{|\}|\~|!|@|#|\$|%|\^|&|\(|\)|-|=|\_|\+|，|。|、|；|‘|’|【|】|·|！| |…|（|）'
        # pattern = r',|\(|\)|（|）'
        pattern = r'，'
        result_list = re.split(pattern, text)
        sub_sent = []
        for item in result_list:
            len_item = len(item)
            if len_item == 0:
                start += 1  # 包含一个标点
            else:
                sub_sent.append((item, start, start + len_item))
                start = start + len_item + 1  # 还要多包含一个标点

        return sub_sent

    def filtering_tags(self, sub_tags, record_list, part_ranges):
        '''
        提取part ranges数组所给出范围内的Tags
        :param sub_tags: tags Dict
        :param record_list: 对应于整个记录的（entity，label，start，end）集合
        :param part_ranges:提取Tags所涉及的坐标范围 列表
        :return: 更新后的tags, 不等长的两维数组
        '''
        index = len(sub_tags.keys())
        if index > 0:
            max_index = max(sub_tags.keys())
            last_label = sub_tags[max_index][0][1]
            index = max_index
        else:
            last_label = 'None'
            index = -1

        for records in record_list.values():
            for entity, label, e_begin, e_end in records:
                if label in ['C','R']:
                    self.Check_tag_set.append((entity, label, e_begin, e_end))
                else:
                    for part_begin, part_end in part_ranges:
                        # part_begin,        part_end
                        #      e_begin, e_end           #包含关系
                        if (e_begin >= part_begin) and (e_end <= part_end):
                            # print(sub_tags)
                            # print((entity, label, e_begin, e_end))
                            if label != last_label:
                                sub_tags[index + 1] = [(entity, label, e_begin, e_end)]
                                last_label = label
                                index += 1
                            else:
                                sub_tags[index].append((entity, label, e_begin, e_end))
                        break
        return sub_tags

    def find_body_tag(self, sub_tags):
        '''
        存储子句中第一个body entity
        :param sub_tags: 包含（entity, label, begin, end）的dict
        '''
        self.body_root = None
        result = []
        for index in sorted(sub_tags):
            for entity, label, begin, end in sub_tags[index]:
                if label == 'B':
                    result.append(entity)
                    if self.body_root is None:
                        self.body_root = (entity, label, begin, end)
                        if self.first_body_root is None:
                            self.first_body_root = self.body_root
        return result

    def create_single_instance_graph(self, record_list, dict_entity_code, original_txt_block):
        '''
        生成单个诊断记录所对应的子图（知识图谱）
        :param records: 两层结构，Block + 列表集合（短语，Tag, Begin, end）
        :param dict_entity_code: 短语和编码的双向dict
        :param original_txt_block:
        :return:图谱
        '''
        G = nx.Graph()

        # Record, Block,                      sub Block
        #       , sentence （以分割标志为界的）, sub_sentence（以逗号进行分割的部分）
        for records in record_list.values():  # 处理Block
            for entity, label, begin, end in records:  # 诊断记录中的一行数据（包含四列）
                if label in ["R"]:  # 另行处理
                    continue
                # 先把所有的节点都生成出来
                node_id = dict_entity_code[entity]
                G.add_node(node_id, type=label, name=entity, pos=(begin, end))

        ###################################################################################
        sub_tags = {}
        original_txt_sub_block = {}
        self.Check_tag_set = []
        body_nodes = []

        for index, (sentence, begin, end) in original_txt_block.items():
            # 句法分析
            self.p_relations, self.c_relations = self.syntax_analysis(sentence, begin)

            # 将与Block对应的句子sentence，按标点符号进行分割
            original_txt_sub_block[index] = self.segment_sentence(sentence, begin)

            # body_nodes = []
            # 开始处理每个block中的每个被逗号等分割出来的子句sub_sentence
            for sub_sentence, s_begin, s_end in original_txt_sub_block[index]:
                # 提取每个子句范围内的 entity 和 label
                sub_tags = self.filtering_tags(sub_tags, record_list, [(s_begin, s_end)])
                # print(sub_tags)

                if len(sub_tags) > 0:
                    body_nodes.extend(self.find_body_tag(sub_tags))
                    # 生成子句 对应的图谱
                    sub_tags = self.creat_edges(G, sub_tags, dict_entity_code)

        # 补充部位之间的关系
        if len(body_nodes) > 1:
            self.process_body_edges(G, body_nodes, dict_entity_code)

        self.process_check_nodes(G, dict_entity_code)
        return G

    def create_dp_train_samples(self, record_list, dict_entity_code, original_txt_block):
        sub_tags = {}
        original_txt_sub_block = {}
        self.Check_tag_set = []

        result = {}
        for index, (sentence, begin, end) in original_txt_block.items():
            # 每个子句生成一个图
            G = nx.Graph()
            # Record, Block,                      sub Block
            #       , sentence （以分割标志为界的）, sub_sentence（以逗号进行分割的部分）
            for entity, label, e_begin, e_end in record_list[index]:  # 诊断记录中的一行数据（包含四列）
                if label in ["R", "C"]:  # 另行处理
                        continue
                node_id = dict_entity_code[entity]
                G.add_node(node_id, type=label, name=entity, pos=(e_begin, e_end))

            # 进行分词和词性标注
            seg_result = self.syntax.segment.cut(sentence)
            # 句法分析
            relations, score_matrix = self.syntax.parse_dependency(seg_result, sentence, begin)
            self.p_relations, self.c_relations = self.syntax.relation_analysis(relations)

            # 将与Block对应的句子sentence，按标点符号进行分割
            original_txt_sub_block[index] = self.segment_sentence(sentence, begin)

            body_nodes = []
            # 开始处理每个block中的每个被逗号等分割出来的子句sub_sentence
            for sub_sentence, s_begin, s_end in original_txt_sub_block[index]:
                # 提取每个子句范围内的 entity 和 label
                sub_tags = self.filtering_tags(sub_tags, record_list, [(s_begin, s_end)])

                if len(sub_tags) > 0:
                    body_nodes.extend(self.find_body_tag(sub_tags))
                    if len(sub_tags) > 1:
                        # 生成子句 对应的图谱
                        sub_tags = self.creat_edges(G, sub_tags, dict_entity_code)

            # 补充部位之间的关系
            if len(body_nodes) > 1:
                self.process_body_edges(G, body_nodes, dict_entity_code)

            # 对齐dp分词与NER分词
            entity_dic = self.alignment_entity(sentence, begin, seg_result, record_list[index])
            graph_input, graph_label = self.generate_graph_label(G, entity_dic, score_matrix, dict_entity_code)

            if len(G.edges) > 0:
                result[index] = (G, sentence, seg_result, score_matrix, graph_input, graph_label)

        return result

    def alignment_entity(self, sentence, begin, seg_result, entities):
        seg_entities = []
        start = begin
        end = 0
        for word, _ in seg_result:
            end = start + len(word)
            seg_entities.append((word, start, end))
            start = end
        assert len(sentence)== (end -begin), "Error: 分词与句子长度不匹配"

        entity_dic = {}
        for entity, label, e_begin, e_end in entities:
            if label in ["B", "D", "S", "T"]:
                for i, (word, start, end) in enumerate(seg_entities):
                    # e_begin, ..., e_end
                    #     start, end
                    if e_begin <= start and end <= e_end:
                        assert word in entity, "%s 与 %s不匹配" % (word, entity)
                        entity_dic[i + 1] = (entity, label)

        return entity_dic

    def generate_graph_label(self, net, entity_dic, score_matrix, dict_entity_code):
        a, b, c = score_matrix.shape
        assert b == c, "不是方阵"
        graph_label = np.zeros((b, c))
        graph_input = np.zeros((b, c))

        entity_code = {
            "T": 1, "B": 2, "S": 3, "D": 4,
        }

        relations = net.edges
        for i in entity_dic.keys():
            entity_a, label_a = entity_dic[i]
            graph_input[:, i] = entity_code[label_a]
            id_a = dict_entity_code[entity_a]
            for j in entity_dic.keys():
                if i == j:
                    continue
                entity_b, _ = entity_dic[j]
                id_b = dict_entity_code[entity_b]
                if (id_a, id_b) in relations or (id_b, id_a) in relations:
                    graph_label[i, j] = 1

        return graph_input, graph_label

    def process_body_edges(self, G, body_nodes, dict_entity_code):
        nodes_id = []
        for node in body_nodes:
            nodes_id.append(dict_entity_code[node])
        count = len(nodes_id)
        for i in range(count):
            a_node = nodes_id[i]
            for j in range(i + 1, count):
                b_node = nodes_id[j]
                if ((self.body_structure.body_relations[a_node] == b_node) or
                        (self.body_structure.body_relations[b_node] == a_node)):
                    G.add_edge(a_node, b_node, value=self.relation_type["B"])

    def grouping_tags(self, sub_tags):
        group_tags = {"B": [], "D": [], "S": [], "T": [],}
        for index in sorted(sub_tags):
            for entity, label, begin, end in sub_tags[index]:
                group_tags[label].append((entity, label, begin, end))
        return group_tags

    def creat_edges(self, G, tags, dict_entity_code):
        '''
        生成图中的边
        :param G: 正在生成中的子图
        :param tags: entity和label的dict
        :param dict_entity_code: entity的编码表，bidict结构
        :return: 当前无法处理的Tags集合
        '''
        # 保存当前无法处理的 （entity，label，...）
        reserved_tags = {}
        group_tags = self.grouping_tags(tags)
        for label, item in group_tags.items():
            # 排查组内的串联或并联关系
            if len(item) > 1:
                # 判断 这组 中是否存在 串联的形式
                start_tags, end_tags = self.adjust_tags_by_syntax(G, item, label, dict_entity_code)
                if label == 'B':
                    group_tags[label] = end_tags
                else:
                    group_tags[label] = start_tags

        len_B = len(group_tags["B"])
        len_D, len_S, len_T = len(group_tags["D"]), len(group_tags["S"]), \
                                     len(group_tags["T"])

        # 当前的Tags集中具有Body，以及其它任何Tag，
        if len_B > 0 and len_D + len_S + len_T > 0:
            # Body tags为中心
            for body_node in group_tags["B"]:
                body_id = dict_entity_code[body_node[0]]
                for label, other_nodes in group_tags.items():
                    if label != "B":
                        for other in other_nodes:
                            current_id = dict_entity_code[other[0]]
                            G.add_edge(body_id, current_id, value=self.relation_type[label])

        # 当前的Tags集中具有Disease，不管是否存在其它任何Tag，则以Disease为中心进行连接, Disease连接全局的Body root
        elif len_D > 0:
            if self.body_root is not None:
                body_id = dict_entity_code[self.body_root[0]]
            elif self.first_body_root is not None:
                body_id = dict_entity_code[self.first_body_root[0]]
            else:
                body_id = None

            # Disease tags为中心
            for disease_node in group_tags["D"]:
                disease_id = dict_entity_code[disease_node[0]]
                if body_id is not None:
                    G.add_edge(body_id, disease_id, value=self.relation_type["D"])

                for label, other_nodes in group_tags.items():
                    if label not in ["D", "B"]:
                        for other in other_nodes:
                            current_id = dict_entity_code[other[0]]
                            G.add_edge(disease_id, current_id, value=self.relation_type[label])

        # 当前的Tags集中只具有Sign，直接与全局的Body root相连接
        elif len_S > 0:
            if self.body_root is not None:
                body_id = dict_entity_code[self.body_root[0]]
            else:
                body_id = dict_entity_code[self.first_body_root[0]]
            for sign_node in group_tags["S"]:
                sign_id = dict_entity_code[sign_node[0]]
                G.add_edge(body_id, sign_id, value=self.relation_type["S"])

        # 其它情况下，将不能处理的Tags，传输到下一子句中进行处理
        else:
            reserved_tags = tags

        return reserved_tags

    def process_check_nodes(self, G, dict_entity_code):

        body_id = dict_entity_code[self.first_body_root[0]]

        # 处理Check Tag
        G.add_node(1, type='A', name="IHC")
        G.add_edge(body_id, 1, value=self.relation_type["C"])

        root_id = 1
        # 将每个Check项与抽象的IHC节点相连
        for entity, label, e_begin, e_end in self.Check_tag_set:
            if label == 'C':
                check_node_id = dict_entity_code[entity]
                # 边的属性名不能用type
                G.add_edge(root_id, check_node_id, value=self.relation_type["C"])

    def adjust_tags_by_syntax(self, G, taglist, label, dict_entity_code):
        '''
        进行entity之间的关系检测，将同类tags中可以串联的部分进行串联后，将首尾entity返回，
        :param G: 正在生成的子图
        :param taglist: 需要处理的同一类的tags集合
        :param label:当前tags集合所属的label
        :param dict_entity_code: entity与code的双向dict
        :return:进行串联后的首entity，尾entity集合
        '''
        # 如果同一类的Tags不至一个元素，则可能存在串并联的情况
        if len(taglist) > 1:
            new_start_tags = []
            new_end_tags = []
            # 对taglist中元素进行检测，发现其中是否存在串联关系，返回是针对taglist的序号关系
            relations = self.detect_relation_between_tags(taglist, self.c_relations)
            if len(relations) > 0:  # 有多组的串行关系，各个组之间就是并列关系
                # 开始处理一组串联的元素，根据坐标串联起来，提出首尾元素
                for items in relations:
                    len_item = len(items)
                    serial_tags = []
                    # 需要从对应的序号，转换为entity集合
                    for i in items:
                        serial_tags.append(taglist[i])
                    # 按前后位置进行排序
                    serial_tags.sort(key=lambda x: x[2])
                    # 进行首尾的串联
                    start_node = serial_tags[0]
                    start_id = dict_entity_code[start_node[0]]
                    for i in range(1, len_item):
                        next_node_id = dict_entity_code[serial_tags[i][0]]
                        # 边的属性名不能用type
                        G.add_edge(start_id, next_node_id, value=self.relation_type[label])
                        start_id = next_node_id
                    # 保存首尾entity元素
                    new_start_tags.append(serial_tags[0])
                    new_end_tags.append(serial_tags[-1])

                return new_start_tags, new_end_tags
            else:  # 全部是平行的关系，首尾entity是相同
                return taglist, taglist
        # 只包含唯一元素
        else:
            return taglist, taglist

    def detect_relation_between_tags(self, tags, relations):
        '''
        根据relations集合中的数据，将tags进行分组，有关联的tag分到一起
        :param tags: 需要检测的tags
        :param relations: 所使用的relations
        :return: 按relations进行分组后的 tags的序号
        '''
        len_tags = len(tags)
        results = {}
        # 循环，配对检测是否entity之间存在 关系，即坐标范围重叠
        for i in range(0, len_tags - 1):
            begin_i = tags[i][2]
            end_i = tags[i][3]
            for j in range(i + 1, len_tags):
                begin_j = tags[j][2]
                end_j = tags[j][3]

                for p_start, p_end, p, n_start, n_end, n in relations.values():
                    if p_start > n_start:
                        p_start, p_end, n_start, n_end = n_start, n_end, p_start, p_end
                    # begin_i, end_i, begin_j, end_j
                    # p_start, p_end, n_start, n_end , end的坐标是对应词的后一个位置，不包含在词的范围内
                    inter_p = (p_start < end_i) and (begin_i < p_end )
                    inter_n = (n_start < end_j) and (begin_j < n_end)

                    if inter_p and inter_n:
                        results[(i, j)] = True
                        break

        # 召回孤立元素
        all_set = set(np.arange(len_tags))

        merged_result = []
        #将可以串联或并联的元素，进行合并到一个集合中，即分到一组，
        for first, second in results.keys():
            s = False
            for item in merged_result:
                if first in item or second in item:
                    item.update({first, second})
                    s = True
                    break
            if not s:
                merged_result.append({first, second})

            all_set.discard(first)
            all_set.discard(second)
        # 将孤立元素，逐个加入结果集
        for i in all_set:
            merged_result.append({i})

        return merged_result

    def show(self, result, dict_entity_code, record_id):
        count_pic = len(result)
        plt.clf()
        plt.rcParams['font.family'] = ['sans-serif']
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        plt.figure(num="实例子图", figsize=(6, 4*count_pic), dpi=100, clear=True)

        for index, (G, sentence, seg_result, score_matrix, graph_input, graph_label) in result.items():
            plt.subplot(count_pic,1,index,)
            import re
            title = re.sub(r"(.{60})", "\\1\n", sentence.replace('$', ' '))  # 手动插入换行符
            plt.title(title,
                      y=-0.1, fontdict={'fontsize': 12, })  # 'horizontalalignment': 'left'

            labels = {}
            for i in G.nodes:
                labels[i] = dict_entity_code.inverse[i]
            # pos 指的是布局 主要有spring_layout , random_layout,circle_layout,shell_layout。circular_layout, spectral_layout
            nx.draw(G, pos=nx.spring_layout(G), node_color='b', edge_color='r',
                    # labels=dict_entity_code.inverse,
                    labels=labels,
                    with_labels=True, font_size=12, node_size=20)

        filename = "{}//results//pic2021//病理诊断-{:05d}.jpg".format(PROJECT_ROOT, record_id)
        plt.savefig(filename)

        return
