import os
import pandas
import random
import string
from py2neo import Graph, Node, NodeMatcher, Subgraph, Schema

from src.database.config import DataSet
from src.time_utils.transformer import TimeTransformer


def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        return False


# requirements: pip install monotonic
def push_to_neo4j(path="", offset=0, dataset=None, machine=None, source=None):
    # 将数据直接存入neo4j中
    # df = pandas.read_csv(path, index_col='time', sep='\t')
    df = pandas.read_csv(path, sep='\t')  # time现在可以为空
    d: DataSet = DataSet(dataset)
    d: DataSet = d.load_from_config()
    columns: dict = d.machines[machine].sources[source].columns
    column_names = columns.keys()
    # print(pandas.isna(df['entity2'][0]))

    graph = Graph("http://localhost:7474", auth=("neo4j", "123456"))
    matcher = NodeMatcher(graph)
    unique_name_start = dataset + "-" + machine + "-" + source + "-"
    query_result = matcher.match(dataset, machine, source)
    node_list = list(query_result)
    node_map = node_list2map(node_list)

    schema = Schema(graph)
    has_unique_index = 'unique_name' in schema.get_uniqueness_constraints(dataset)
    has_index = ('unique_name',) in schema.get_indexes(dataset)
    if (not has_unique_index) and has_index:
        schema.drop_index(dataset, 'unique_name')
    if not has_unique_index:
        schema.create_uniqueness_constraint(dataset, "unique_name")

    length = df.shape[0]
    tx = graph.begin()

    create_nodes = []
    push_nodes = []
    push_nodes_set = set()
    isolate_names = [''] * length

    hastime = gen_isolatednode_idx(node_map, "isolated_node_hastime") + 1
    timeless = gen_isolatednode_idx(node_map, "isolated_node_timeless") + 1

    csv_lines = []

    for i in range(length):
        if not pandas.isna(df['start'].values[i]):
            unique_name_a = unique_name_start + str(df['start'].values[i])
            if unique_name_a not in node_map.keys():
                node_a = create_new_node(str(df['start'].values[i]), dataset, machine, source)
                create_nodes.append(node_a)
                node_map[unique_name_a] = node_a
        if not pandas.isna(df['end'].values[i]):
            unique_name_b = unique_name_start + str(df['end'].values[i])
            if unique_name_b not in node_map.keys():
                node_b = create_new_node(str(df['end'].values[i]), dataset, machine, source)
                create_nodes.append(node_b)
                node_map[unique_name_b] = node_b
        if pandas.isna(df['start'].values[i]) and pandas.isna(df['end'].values[i]) and (
                not pandas.isna(df['time'].values[i])):
            idx = hastime
            hastime += 1
            unique_name_a = "isolated_node_hastime" + str(idx)
            node_a = create_new_node(unique_name_a, dataset, machine, source)
            create_nodes.append(node_a)
            isolate_names[i] = unique_name_a
            node_map[unique_name_a] = node_a
        if pandas.isna(df['start'].values[i]) and pandas.isna(df['end'].values[i]) and pandas.isna(
                df['time'].values[i]):
            idx = timeless
            timeless += 1
            unique_name_a = "isolated_node_timeless" + str(idx)
            node_a = create_new_node(unique_name_a, dataset, machine, source)
            create_nodes.append(node_a)
            isolate_names[i] = unique_name_a
            node_map[unique_name_a] = node_a

        if i % 100 == 0:
            print("creating ", i, '/', length, "nodes")

    print('create', len(create_nodes))
    if len(create_nodes) > 0:
        tx.create(Subgraph(create_nodes))
    graph.commit(tx)
    print('create finish')

    for i in range(length):

        # print(df['entity1'][i],df['entity2'][i],df['relation_type'][i])

        if (not pandas.isna(df['start'].values[i])) and (not pandas.isna(df['end'].values[i])) and (
                not pandas.isna(df['time'].values[i])):
            # 可以抽取成新的函数
            unique_name_a = unique_name_start + str(df['start'].values[i])
            unique_name_b = unique_name_start + str(df['end'].values[i])
            node_a = node_map[unique_name_a]
            node_b = node_map[unique_name_b]
            relation = dict()

            ts = str(df['time'].values[i])
            relation['time'] = ts
            # 计算时间偏移量
            if not is_number(ts):
                ts = TimeTransformer.time2ts(ts)
            fixed_ts = float(ts) + offset
            relation['uni_time'] = str(fixed_ts)

            # relation = Relationship(node_a, str(df.index[i]), node_b)
            # relation['relation_type'] = str(df['relation_type'][i])
            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                if (not hasattr(columns[col_name], 'annotation')) or columns[col_name].annotation == "edge":
                    relation[col_name] = value
                elif columns[col_name].annotation == "start":
                    node_a[col_name] = value
                else:
                    node_b[col_name] = value

            # tx.create(relation)
            node_map[unique_name_a] = node_a
            node_map[unique_name_b] = node_b
            push_new_node(push_nodes_set, node_a)
            push_new_node(push_nodes_set, node_b)
            create_edge(tx, dataset, unique_name_a, unique_name_b, str(df['relation_type'].values[i]),
                        dict2str(relation), csv_lines)

        elif (not pandas.isna(df['start'].values[i])) and (pandas.isna(df['end'].values[i])) and (
                not pandas.isna(df['time'].values[i])):
            unique_name_a = unique_name_start + str(df['start'].values[i])
            node_a = node_map[unique_name_a]
            # relation = Relationship(node_a, str(df.index[i]), node_a)
            relation = dict()

            ts = str(df['time'].values[i])
            relation['time'] = ts
            # 计算时间偏移量
            if not is_number(ts):
                ts = TimeTransformer.time2ts(ts)
            fixed_ts = float(ts) + offset
            relation['uni_time'] = str(fixed_ts)

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                if (not hasattr(columns[col_name], 'annotation')) or columns[col_name].annotation == "edge":
                    relation[col_name] = value
                else:
                    node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)
            create_edge(tx, dataset, unique_name_a, unique_name_a, str('1'), dict2str(relation), csv_lines)  # 一元关系自环


        elif (not pandas.isna(df['start'].values[i])) and (not pandas.isna(df['end'].values[i])) and (
                pandas.isna(df['time'].values[i])):
            # time为空，但start和end非空

            unique_name_a = unique_name_start + str(df['start'].values[i])
            unique_name_b = unique_name_start + str(df['end'].values[i])
            node_a = node_map[unique_name_a]
            node_b = node_map[unique_name_b]

            relation = dict()

            relation['time'] = "NULL"
            # time为空则uni_time也为空
            relation['uni_time'] = "NULL"

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                if (not hasattr(columns[col_name], 'annotation')) or columns[col_name].annotation == "edge":
                    relation[col_name] = value
                elif columns[col_name].annotation == "start":
                    node_a[col_name] = value
                else:
                    node_b[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            node_map[unique_name_b] = node_b
            push_new_node(push_nodes_set, node_a)
            push_new_node(push_nodes_set, node_b)
            create_edge(tx, dataset, unique_name_a, unique_name_b, str(df['relation_type'].values[i]),
                        dict2str(relation), csv_lines)




        elif (not pandas.isna(df['start'].values[i])) and (pandas.isna(df['end'].values[i])) and (
                pandas.isna(df['time'].values[i])):
            # time为空，start非空而end为空
            # 此时数据为一个节点的属性
            unique_name_a = unique_name_start + str(df['start'].values[i])
            node_a = node_map[unique_name_a]
            node_a['time'] = "NULL"
            node_a['uni_time'] = "NULL"  # 这种情况time为空

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)

        elif (pandas.isna(df['start'].values[i])) and (not pandas.isna(df['end'].values[i])) and (
                pandas.isna(df['time'].values[i])):
            # time为空，start空而end非空
            # 此时数据为一个节点的属性
            unique_name_a = unique_name_start + str(df['end'].values[i])
            node_a = node_map[unique_name_a]
            node_a['time'] = "NULL"
            node_a['uni_time'] = "NULL"
            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)

        elif (pandas.isna(df['start'].values[i])) and (pandas.isna(df['end'].values[i])) and (
                pandas.isna(df['time'].values[i])):
            # time为空，start空、end空
            # 无时间戳的额外信息作为孤立节点
            # 新创立一个特殊标签
            # unique_name_a = "isolated_node_timeless" + str(idx)
            unique_name_a = isolate_names[i]
            node_map[unique_name_a] = node_a
            node_a['time'] = "NULL"
            node_a['uni_time'] = "NULL"

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)

        elif (pandas.isna(df['start'].values[i])) and (pandas.isna(df['end'].values[i])) and (
                not pandas.isna(df['time'].values[i])):
            # time非空，start空、end空
            # 有时间戳的额外信息作为孤立节点
            # 新创立一个特殊标签
            # unique_name_a = "isolated_node_hastime" + str(idx)
            unique_name_a = isolate_names[i]
            node_map[unique_name_a] = node_a

            ts = str(df['time'].values[i])
            node_a['time'] = ts
            # 计算时间偏移量
            if not is_number(ts):
                ts = TimeTransformer.time2ts(ts)
            fixed_ts = float(ts) + offset
            node_a['uni_time'] = str(fixed_ts)

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)

        elif (pandas.isna(df['start'].values[i])) and (not pandas.isna(df['end'].values[i])) and (
                not pandas.isna(df['time'].values[i])):
            unique_name_a = unique_name_start + str(df['end'].values[i])
            node_a = node_map[unique_name_a]
            # relation = Relationship(node_a, str(df.index[i]), node_a)
            relation = dict()

            ts = str(df['time'].values[i])
            relation['time'] = ts
            # 计算时间偏移量
            if not is_number(ts):
                ts = TimeTransformer.time2ts(ts)
            fixed_ts = float(ts) + offset
            relation['uni_time'] = str(fixed_ts)

            for col_name in column_names:
                pre_value = df[col_name].values[i]

                if columns[col_name].type == 'string' or str(pre_value) == "nan":
                    value = str(pre_value)
                elif columns[col_name].type == 'number':
                    value = float(pre_value)
                elif columns[col_name].type == 'int':
                    value = int(pre_value)
                elif columns[col_name].type == 'bool':
                    value = bool(pre_value)

                if (not hasattr(columns[col_name], 'annotation')) or columns[col_name].annotation == "edge":
                    relation[col_name] = value
                else:
                    node_a[col_name] = value
            # tx.create(relation)
            node_map[unique_name_a] = node_a
            push_new_node(push_nodes_set, node_a)
            create_edge(tx, dataset, unique_name_a, unique_name_a, str('1'), dict2str(relation), csv_lines)  # 一元关系自环

        if i % 100 == 0:
            print("processing ", i, '/', length, "nodes")

    for node in push_nodes_set:
        push_nodes.append(node_map[node])
    print('push', len(push_nodes))
    if len(push_nodes) > 0:
        tx = graph.begin()
        tx.push(Subgraph(push_nodes))
        graph.commit(tx)
    print("push finish")

    print('creating edge')
    salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
    create_edge_all(graph, dataset, unique_name_start + salt, csv_lines)
    print('create edge finish')


def push_new_node(push_nodes_set, node):
    node_name = node['unique_name']
    if not node_name in push_nodes_set:
        push_nodes_set.add(node_name)


def create_new_node(node_name, dataset: str, machine: str, source: str):
    # print(node_name, "does not exist, it will be created.")
    unique_name_start = dataset + "-" + machine + "-" + source + "-"
    node_a = Node(name=node_name)
    node_a.add_label(dataset)
    node_a.add_label(machine)
    node_a.add_label(source)
    node_a['unique_name'] = unique_name_start + str(node_name)
    return node_a


def check_node_exists(node_list, unique_name):
    # 检查元素是否存在，若存在，返回该元素；若不存在，返回空值
    nodes = list(node_list.where("_.unique_name = '" + unique_name + "'"))
    if len(nodes) > 0:
        return nodes[0]
    else:
        return None


def gen_isolatednode_idx(node_map, name):
    cnt = 0

    for key in node_map:
        if name in key:
            cnt = cnt + 1

    return cnt


def node_list2map(node_list) -> dict:
    node_map = {}
    for node in node_list:
        node_map[node['unique_name']] = node
    return node_map


def create_edge(tx, dataset, unique_name_a, unique_name_b, relation_type, params, csv_lines):
    csv_lines.append((str(unique_name_a),
                      str(unique_name_b),
                      str(relation_type),
                      str(params)))
    return


def create_edge_all(graph, dataset, csv_name, csv_lines):
    # FIXME: 不应该写死
    home_dir = os.path.expanduser('~')

    with open(home_dir + '/neo4j/import/' + csv_name + ".csv", 'w') as f:
        f.write('start\tend\trelation_type\tparams\n')
        for line in csv_lines:
            f.write('\t'.join(line) + '\n')
        f.close()

        command = ("""
        USING PERIODIC COMMIT 1000
        LOAD CSV WITH HEADERS FROM "%s" as line
        FIELDTERMINATOR '\\t'
        MATCH (a:`%s` {unique_name: line.start}), (b:`%s` {unique_name: line.end})
        CALL apoc.create.relationship(a, line.relation_type, apoc.convert.fromJsonMap(line.params), b) YIELD rel
        RETURN count(*)
        """ % (str('file:///' + csv_name + '.csv'), str(dataset), str(dataset)))
        print(command)
        graph.run(command)

    os.remove(home_dir + '/neo4j/import/' + csv_name + ".csv")


def dict2str(params):
    # 需要对反斜杠\转义，考虑Windows下的路径
    # 避免单引号带来的麻烦，使用双引号，因此需要对双引号转义
    return '{' + ', '.join(
        [str(k) + ": " + (str(v) if type(v) == int or type(v) == float else '{}'.format(repr(str(v)))) for k, v in
         params.items()]) + '}'


def get_rtype(relationship):
    # 返回关系的类型
    return str(relationship).split(':')[1].split(' ')[0]
