import json
import logging
import os

from tqdm import tqdm

from cfg.cfg_parser import get_config
from cfg.loginit import init_root_logger
from compact.dependency_preservation import EnDependencyRemain, GS_compact
from darpa_src_parse.enuminfo import Annotations

logger = logging.getLogger()

logger.setLevel(logging.INFO)


class GraphCompactExperiment:

    def __init__(self, v_file, e_file):
        self.vertex_info_file = v_file
        self.edge_file = e_file

        self.edge_num = 0

        # 用来存储所有的节点信息.
        self.all_vertices = {}



        # 压缩边
        self.e_compact_1 = EnDependencyRemain(36, 1, 'e_compact_1', 1000000)
        self.e_compact_2 = EnDependencyRemain(36, 2, 'e_compact_2', 1000000)
        self.e_compact_3 = EnDependencyRemain(36, 3, 'e_compact_3', 1000000)
        self.s_compact = GS_compact('gs_compact', 1000000)




    def show_compact_status(self):
        """
        显示边压缩的情况
        """
        info = f"共有边{self.edge_num}\n"
        info += f"1跳循环压缩了{self.e_compact_1.compact_num},压缩率为{self.e_compact_1.compact_num / self.edge_num},其中循环情况(版本更新){self.e_compact_1.cycle}\n"
        info += f"2跳循环压缩了{self.e_compact_2.compact_num},压缩率为{self.e_compact_2.compact_num / self.edge_num},其中循环情况(版本更新){self.e_compact_2.cycle}\n"
        info += f"3跳循环压缩了{self.e_compact_3.compact_num},压缩率为{self.e_compact_3.compact_num / self.edge_num},其中循环情况(版本更新){self.e_compact_3.cycle}\n"
        info += f"全局语义算法压缩{self.s_compact.compact_num},压缩率为{self.s_compact.compact_num / self.edge_num}\n"

        for compact in [self.e_compact_1, self.e_compact_2, self.e_compact_3, self.s_compact]:
            i = f"{compact.dbname}压缩，对压缩数据的统计为{compact.compact_status}\n"
            info += i

        logger.info(info)

        return info





    def get_vertex_info_for_store(self, vertex_id):
        """通过节点的id,获取不同类型节点的 label 与属性.
        参数:
            vertex_id: 节点id
        返回值:
            label: 标签信息 / None
            props: 所有属性信息 / None
        """

        # 获取节点信息.
        vertex_info = self.all_vertices.get(vertex_id)
        assert vertex_info is not None

        # 获取节点的类型
        type = vertex_info.get('type')

        # 暂时将所有的属性信息当作 存储信息.
        props = vertex_info

        # 为所有节点添加UUID属性为节点id
        props.update({Annotations.VERTEX_UUID_KEY: vertex_id})

        return type, props


    def compact_edges(self, edge):
        """
        参数:
            edge: 需要存储的边.包含内容.
                                {"src": "A8AAFB0E-3E65-11E8-A5CB-3FA3753A265A",
                                "op": "lseek",
                                "dst": "0C51AAE3-0594-4A51-9405-4FC4214A0041",
                                "time": "1523546697816407532", "datetime": "2018-04-12 15:24:57.816"}
            self.all_vertices: 包含所有需要存储节点的信息.
                                {
                                "C0E60490-62BE-7B5A-BE62-6F2DBA7BA087": {"type": "FILE_OBJECT_FILE", "path": ["/dev/tty", "/dev/pts/1"]}
                                }
        返回值:
            无
        """
        src_id = edge.get('src')
        dst_id = edge.get('dst')
        edge_type = edge.get('op')

        self.edge_num += 1

        # 分别测试 循环跳数 1，2，3的压缩效果
        s1 = self.e_compact_1.enhence_dependency_remain_compact(src_id, dst_id)
        self.e_compact_1.update_compact_status(edge_type, s1)

        s2 = self.e_compact_2.enhence_dependency_remain_compact(src_id, dst_id)
        self.e_compact_2.update_compact_status(edge_type, s2)

        s3 = self.e_compact_3.enhence_dependency_remain_compact(src_id, dst_id)
        self.e_compact_3.update_compact_status(edge_type, s3)

        # 测试全局语义压缩
        s = self.s_compact.global_semantic_compact(src_id, dst_id)
        self.s_compact.update_compact_status(edge_type, s)

        # 测试完全压缩
        # self.c_compact.complete_semantic_compact(src_id, dst_id)


    def start_compact_experiment(self):

        # # 打开文件并读取每一行,将说有信息存储到 self.all_vertices
        # with open(self.vertex_info_file, 'r') as f_vertex:
        #     for line in tqdm(f_vertex, desc='Processing file'):
        #         # 将每行的JSON字符串转换为字典
        #         vertex = json.loads(line.strip())
        #         # 将这个字典合并到总的字典中
        #         self.all_vertices.update(vertex)

        # 设置进度提醒的阈值
        alert_num = 1000000
        # 设置处理的边个数
        edge_num = 0

        with tqdm(open(self.edge_file, 'r'), unit='lines') as edges:
            for line in edges:
                edge = json.loads(line)

                # 进行压缩实验
                self.compact_edges(edge)

                # 查看是否进行alert
                edge_num += 1
                if edge_num % alert_num == 0:
                    logger.info(f"现在已经处理了{edge_num}条边!")

        logger.info(
            f"共处理了{edge_num}条边，压缩了{self.e_compact_3.compact_num}其中循环情况{self.e_compact_3.cycle}\n")
        logger.info(f" 全局语义算法压缩{self.s_compact.compact_num}")



if __name__ == '__main__':
    cfg = get_config()
    log_cfg = cfg['compactlog']
    init_root_logger(log_cfg['level'], log_cfg['file'], log_cfg['console'])
    # 针对以上的场景使用,如下时间段来构造   "2018-04-12 13:59:50.256"  到"2018-04-12 14:39:07.516"的日志对应的图.

    # v_file = r'C:\Users\23274\Desktop\darpa-e3-detection\output\cadets\vertex.json'
    # e_file = r'C:\Users\23274\Desktop\darpa-e3-detection\output\cadets\edge.json'

    # v_file = r'D:\darpa_experiment\5d\vertex.json'
    # e_file = r'D:\darpa_experiment\5d\edge.json'

    v_file = r'D:\darpa_experiment\theia\vertex.json'
    e_file = r'D:\darpa_experiment\theia\edge.json'
    experiment = GraphCompactExperiment(v_file, e_file)

    experiment.start_compact_experiment()
    info = experiment.show_compact_status()

    logger.info("正在将结果写入文件")
    res_path = os.path.join(r"C:\Users\23274\Desktop\darpa-e3-detection\output\theia", 'experiment_result.txt')
    res = open(res_path, 'w')
    res.write(info)
    res.close()