from collections import defaultdict
import json
import csv


class Node:
    """Ap 节点"""

    __slots__ = ['id', 'name', 'lanIpAddr', 'siteId', 'parents', 'children']

    def __init__(self, id, name, lanIpAddr, siteId, parent=None, child=None):
        self.id = id
        self.name = name
        self.lanIpAddr = lanIpAddr
        self.siteId = siteId

        self.add_parent(parent)
        self.add_child(child)

    def add_parent(self, parent=None):
        """直接父节点"""

        if parent is None:
            self.parents = []
        else:
            self.parents.append(parent)

    def add_child(self, child=None):
        """直接子节点"""

        if child is None:
            self.children = []
        else:
            self.children.append(child)

    def __repr__(self):
        return '<Node {}>'.format(self.id)

    def to_dict(self):
        dct = dict()
        for field in ['id', 'name', 'lanIpAddr', 'siteId']:
            dct[field] = getattr(self, field)
        return dct


class Dag:
    """Ap 节点有向无环图操作类"""

    def __init__(self, nodes):
        self.nodes = nodes

    def _iter_graph(self, head, direction):
        """
        获取从 head 开始向上/下遍历的所有节点
        返回以 Node 为键的字典（包括当前节点 head）
        """

        children = defaultdict(list)

        def succ(root, dct):
            dct[root]
            root_children = getattr(root, direction)

            for child in root_children:
                dct[root].append(succ(child, defaultdict(list)))

            return dict(dct)

        succ(head, children)
        return dict(children)

    def _iter_children(self, node):
        """从节点 node 开始，遍历其子节点"""

        return self._iter_graph(node, 'children')

    def _iter_parents(self, node):
        """从节点 node 开始，遍历其父节点"""

        return self._iter_graph(self.node, 'parents')

    def _deepest_nodes(self, node, direction='parents'):
        """
        从节点 node 开始，按照方向 direction 找到最深的节点
        默认为找到根节点，其返回值一定为关于节点的列表
        """

        _dct = self._iter_graph(node, direction)
        deeps = []

        def succ(dct):
            for k, v in dct.items():
                if not v:
                    deeps.append(k)
                else:
                    for _v in v:
                        succ(_v)

        succ(_dct)
        return deeps

    def search_graph(self, nodeid):
        """给定节点 id，返回所有包含此节点的连通图"""

        deeps = self._deepest_nodes(self.nodes[nodeid])
        return [self._iter_children(root) for root in deeps]

    def search_to_json(self, nodeid, *args, **kwargs):
        """给定节点 id，返回所有包含此节点的连通图的字符串"""

        _dct = self.search_graph(nodeid)

        def pack_dict(dct):
            if not dct:
                return dict()

            k = list(dct.keys())[0]
            v = list(dct.values())[0]

            if v:
                for i, _v in enumerate(v[:]):
                    v[i] = pack_dict(_v)

            dct['nodeRes'] = k.to_dict()
            dct['childNodes'] = v
            del dct[k]

            return dct

        return json.dumps([pack_dict(d) for d in _dct], *args, **kwargs)


def generate_dag(info_file, adj_file, info_encoding='utf8',
                 info_delimiter=',', info_types=None, major_index=0,
                 adj_encoding='utf8'):
    """
    根据输入文件生成有向无环图模型

    节点信息 csv 文件 info_file:
    id,name,lanIpAddr,siteId
    x,xxxx,xxxx.xxxx.xxxx.xxxx,xxxxxx
    x,xxxx,xxxx.xxxx.xxxx.xxxx,xxxxxx

    邻接表文件 adj_file:
    x x
    x x
    其中右边的数字为左边数字的父节点

    参数表
        - info_file:      节点信息文件
        - adj_file:       邻接表文件
        - info_encoding:  节点信息文件编码
        - info_delimiter: 节点信息文件分隔符
        - info_types:     节点信息文件表格格式
        - major_index:    主键索引
        - adj_encoding:   邻接表文件编码

    返回邻接表文件中所有节点的 Dag 类模型（字典）
    """

    infos = dict()

    if info_types is None:
        info_types = [int, str, str, int]

    with open(info_file, encoding=info_encoding) as f:
        reader = csv.reader(f, delimiter=info_delimiter)
        header = next(reader)
        for row in reader:
            data = {k: t(v) for k, v, t in zip(header, row, info_types)}
            infos[info_types[major_index](row[major_index])] = data

    nodes = dict()

    # 根据邻接表文件更新 nodes 节点，而非节点信息文件
    nodes_pair = open(adj_file, encoding=adj_encoding).read().splitlines()
    for pair in nodes_pair:
        c, p = map(int, pair.split())

        for k in [c, p]:
            if k not in nodes:
                nodes[k] = Node(**infos[k])

        nodes[c].add_parent(nodes[p])
        nodes[p].add_child(nodes[c])

    return Dag(nodes)


if __name__ == '__main__':
    dag = generate_dag(info_file='aps-info.csv', adj_file='aplist.txt')
    json_content = dag.search_to_json(5, indent=4)
    open('ap.json', 'w', encoding='utf8').write(json_content)
