# -*- coding: utf-8 -*-

"""
deploy to neo4j by using cypher batch update

To update a neo4j database, we should do the following in strict order:

1. load the fix files (entity_fix.json and link_fix.json)
2. delete all relations from link_fix (contains only `@id`, as @type changes, @id must change)
3. delete all nodes from entity_fix (contains only `@id`)
4. merge all nodes from entity_fix
5. merge all relations from link_fix

This module depends on the APOC library installed on the target neo4j database
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from typing import Optional
from pprint import pprint

from six import text_type

from pyspark import RDD

from neo4j.v1 import GraphDatabase

from kgpipeline.job import KgJob, JobConfigError
from kgpipeline.jsonutil import list_item_to_string, entity_json_encode, entity_json_decode
from kgpipeline.cnschema import SYSTEM_PROPERTIES

import logging

logger = logging.getLogger(__name__)


IGNORED_PROPERTIES = SYSTEM_PROPERTIES.copy().add("@diff")


def neo4j_values(item):
    for k in item.keys():
        if k in SYSTEM_PROPERTIES:
            continue
        dt = item.get(k)
        if dt is None or isinstance(dt, text_type):
            continue
        elif isinstance(dt, list):
            item[k] = [list_item_to_string(d) for d in dt]
        elif isinstance(dt, dict):
            item[k] = entity_json_encode(dt)
        else:
            item[k] = text_type(dt)
    return item


def partition_properly(rdd, unit=100000):  # type: (RDD, Optional[int]) -> Optional[RDD]
    """
    Prepare rdd for batch operations. Basically we assume each node can process 100,000 items in appropriate time
    :param rdd: input rdd
    :param unit: preferred number of items per partition
    :return: rdd repartitioned
    """
    # it is worth to count them
    c = rdd.count()
    logger.info("partition properly count: {}".format(c))
    if c > 0:
        p = c // unit + 1
        return rdd.repartition(p)


def neo4j_delete_nodes(tx, items):
    return tx.run("UNWIND $batch as row\n"
                  "MATCH (n:Thing {kgid: row.kgid})\n"
                  "DETACH DELETE n", {"batch": items})


def neo4j_delete_relationships(tx, items):
    return tx.run("UNWIND $batch as row\n"
                  "MATCH (n:Thing {kgid: row.in})-[r {kgid: row.kgid}]->(m:Thing {kgid: row.out})"
                  "DELETE r", {"batch": items})


def neo4j_merge_nodes(tx, items):
    return tx.run("UNWIND $batch as row\n"
                  "WITH row.`@type` as kgtype, apoc.map.removeKeys(row, ['@type', '@diff']) as prop\n"
                  "MERGE (n:Thing {kgid: prop.kgid})\n"
                  "ON CREATE SET n = prop\n"
                  "ON MATCH SET n += prop\n"
                  "WITH n, kgtype, count(*) as total\n"
                  "WHERE kgtype is not null\n"
                  "WITH n, kgtype, total\n"
                  "CALL apoc.create.setLabels(n, kgtype) yield node\n"
                  "RETURN total, count(*) as changed", {"batch": items})


def neo4j_merge_relationships(tx, items):
    return tx.run("UNWIND $batch as row\n"
                  "MATCH (n:Thing {kgid: row.in}),(m:Thing {kgid: row.out})\n"
                  "CALL apoc.merge.relationship(n, row.`@type`[0], {kgid: row.kgid},"
                  " apoc.map.removeKeys(row, ['in', 'out', '@diff']), m) yield rel\n"
                  "RETURN count(*)", {"batch": items})


def get_neo4j_batch_operator(uri, username, password, batch_size, operator):
    def __batch__(index, items):
        try:
            driver = GraphDatabase.driver(uri, auth=(username, password))
            batch_list = []
            for item in items:
                batch_list.append(item)
                if len(batch_list) > batch_size:
                    with driver.session() as session:
                        session.write_transaction(operator, batch_list)
                    batch_list = []
            if len(batch_list) > 0:
                with driver.session() as session:
                    session.write_transaction(operator, batch_list)
            driver.close()
            yield index, "success!"
        except Exception as e:
            yield index, e

    return __batch__


def print_and_check_result(res):
    for i, r in res:
        if isinstance(r, Exception):
            raise r
        else:
            logger.info("partition {0}:".format(i))
            pprint(r)


class KgNeo4jIncJob(KgJob):
    def process(self, inputs):
        # bolt connection info
        uri = self.config.get("uri")
        if not uri:
            raise JobConfigError("Input parameter `uri` is missing!")

        username = self.config.get("username")
        if not username:
            raise JobConfigError("Input parameter `username` is missing!")

        password = self.config.get("password")
        if not password:
            raise JobConfigError("Input parameter `password` is missing!")

        partition_unit = self.config.get("partition_unit", 100000)
        batch_size = self.config.get("batch_size", 1000)

        logger.info("start neo4j incremental deployment job...")

        if len(inputs) != 2:
            raise JobConfigError("Needs 2 inputs: neo4j nodes diff and relationships diff!")

        entity_fix = inputs[0][0]
        link_fix = inputs[1][0]

        entity_count = 0
        link_count = 0

        if entity_fix:
            entity_count = entity_fix.count()
            logger.info("entity count: {}".format(entity_count))
            entity_fix = entity_fix.rdd.map(lambda x: neo4j_values(entity_json_decode(x["value"])))

        if link_fix:
            link_count = link_fix.count()
            logger.info("link count: {}".format(link_count))
            link_fix = link_fix.rdd.map(lambda x: neo4j_values(entity_json_decode(x["value"])))

        # 2. delete all relations from link_fix (contains only `@id`, as @type changes, @id must change)
        if link_count > 0:
            link_to_delete = partition_properly(link_fix.filter(lambda x: x.get("@diff") == "delete"), partition_unit)
            if link_to_delete:
                logger.info("relationships to delete partitions: {}".format(link_to_delete.getNumPartitions()))
                op = get_neo4j_batch_operator(uri, username, password, batch_size, neo4j_delete_relationships)
                res = link_to_delete.mapPartitionsWithIndex(op).collect()
                print_and_check_result(res)

        # 3. delete all nodes from entity_fix (contains only `@id`)
        if entity_count > 0:
            entity_to_delete = partition_properly(entity_fix.filter(lambda x: x.get("@diff") == "delete"),
                                                  partition_unit)
            if entity_to_delete:
                logger.info("nodes to delete partitions: {}".format(entity_to_delete.getNumPartitions()))
                op = get_neo4j_batch_operator(uri, username, password, batch_size, neo4j_delete_nodes)
                res = entity_to_delete.mapPartitionsWithIndex(op).collect()
                print_and_check_result(res)

        # 4. merge all nodes from entity_fix
        if entity_count > 0:
            entity_to_merge = partition_properly(entity_fix.filter(lambda x: x.get("@diff") != "delete"),
                                                 partition_unit)
            if entity_to_merge:
                logger.info("nodes to merge partitions: {}".format(entity_to_merge.getNumPartitions()))
                op = get_neo4j_batch_operator(uri, username, password, batch_size, neo4j_merge_nodes)
                res = entity_to_merge.mapPartitionsWithIndex(op).collect()
                print_and_check_result(res)

        # 5. merge all relations from link_fix
        if link_count > 0:
            link_to_merge = partition_properly(link_fix.filter(lambda x: x.get("@diff") != "delete"), partition_unit)
            if link_to_merge:
                logger.info("relationships to merge partitions: {}".format(link_to_merge.getNumPartitions()))
                op = get_neo4j_batch_operator(uri, username, password, batch_size, neo4j_merge_relationships)
                res = link_to_merge.mapPartitionsWithIndex(op).collect()
                print_and_check_result(res)

        logger.info("Neo4j incremental deployment job ended.")

        return []


KgJob.register("neo4jinc", KgNeo4jIncJob)
