# -*- coding: utf-8 -*-

"""
Load data to Transwarp StellarDB incrementally.

Generally we follow the exact same process as the full loading job when loading updated and added data to the database.
But before that, we need to first remove deleted nodes and edges from the database and change the schema accordingly.

Steps:
    1. delete edges
       load link_delete.csv and batch delete
    2. delete nodes
       load entity_delete.csv and batch delete
    3. cypher: change schema if necessary from schema_diff.json
    4. follow the same procedure as in `twjob.py`

Config Parameters:
    1. config:
       server_ip, HiveServer2 IP address
       server_port, HiveServer2 Port (default 10000)
    2. graph_name: graph name to use
    3. graph_db: hive db used for graph import data
    4. loader_path: loader.json path in StellarDB HDFS
    5. entity_path: entity_diff.orc path in StellarDB HDFS
    6. link_path: link_diff.orc path in StellarDB HDFS

Input:
    1. schema_diff.json file
    2. entity_delete.csv file
    3. link_delete.csv  file
    4. hive.hql file

Note:
    1. Currently deleting edges is not properly supported by StellarDB
    2. delete nodes by match (a) where uid(a) in [...] is slow and StellarDB will optimize this case on their part
    3. Schema change is limited: no edge label delete, no node label delete, no dynamic index creation
    4. Kerberos authentication is required in production HiveServer2 deployment, and current job does not handle that
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.sparkutil import text_dataframe_to_lines, text_dataframe_to_json
from kgpipeline.util import by_batch
from kgpipeline.tw import apply_schema_change
from kgpipeline.jobs.twjob import get_tables

from impala.dbapi import connect

import logging

logger = logging.getLogger(__name__)


def get_uploader(server_ip, server_port, graph, match_element, batch_size):
    def __uploader__(index, items):
        # TODO: handle Kerberos authentication
        conn = connect(host=server_ip, port=server_port, auth_mechanism="PLAIN")
        cursor = conn.cursor()
        try:
            cursor.execute("use graph {0}".format(graph))
            total_count = 0
            for batch in by_batch(items, batch_size):
                total_count += len(batch)
                cursor.execute("match {0} where uid(a) in ['{1}'] delete a".format(match_element, "','".join(batch)))
            yield "partition {0} deleted total {1} items".format(index, total_count)
        except Exception as e:
            yield e
        finally:
            cursor.close()
            conn.close()

    return __uploader__


class KgTwIncJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 4:
            raise JobInputMisMatchError("needs exactly 4 input files for StellarDB Data Loading Job")

        server_ip = self.config.get("server_ip")
        if server_ip is None:
            raise JobConfigError("StellarDB Cypher Server IP address must be specified!")

        server_port = self.config.get("server_port", 10000)

        username = self.config.get("username")
        password = self.config.get("password")

        graph_name = self.config.get("graph_name")
        if graph_name is None:
            raise JobConfigError("StellarDB graph name must be specified!")

        graph_db = self.config.get("graph_db")
        if graph_db is None:
            raise JobConfigError("StellarDB Hive Database must be specified!")

        loader_path = self.config.get("loader_path")
        if loader_path is None:
            raise JobConfigError("loader.json path on StellarDB HDFS must be specified!")

        entity_path = self.config.get("entity_path")
        if entity_path is None:
            raise JobConfigError("entity orc path on StellarDB HDFS must be specified!")

        link_path = self.config.get("link_path")
        if link_path is None:
            raise JobConfigError("link orc path on StellarDB HDFS must be specified!")

        schema_diff = text_dataframe_to_json(inputs[0][0])
        entity_delete = inputs[1][0]
        link_delete = inputs[2][0]

        # delete edges and nodes
        if link_delete and not link_delete.rdd.isEmpty():
            link_delete = link_delete.rdd.map(lambda x: x[0])
            delete_edge = get_uploader(server_ip, server_port, graph_name, "[a]", 10000)
            link_delete.mapPartitionsWithIndex(delete_edge)

        if entity_delete and not entity_delete.rdd.isEmpty():
            entity_delete = entity_delete.rdd.map(lambda x: x[0])
            delete_node = get_uploader(server_ip, server_port, graph_name, "(a)", 10000)
            entity_delete.mapPartitionsWithIndex(delete_node)

        # TODO: handle Kerberos authentication
        if username:
            conn = connect(host=server_ip, port=server_port, auth_mechanism="PLAIN", user=username, password=password)
        else:
            conn = connect(host=server_ip, port=server_port, auth_mechanism="PLAIN")

        cypher_enter = "set query.lang = cypher"
        cypher_use = "use graph {0}".format(graph_name)

        try:
            # change schema first
            cursor = conn.cursor()
            try:
                cursor.execute(cypher_enter)
                cursor.execute(cypher_use)
                cypher_schema_change_commands = apply_schema_change(schema_diff)
                for c in cypher_schema_change_commands:
                    cursor.execute(c)
            finally:
                cursor.close()

            # load data from hive table
            hive_lines = text_dataframe_to_lines(inputs[3][0])
            if hive_lines:
                hive_use = "use {0}".format(graph_db)
                hive_create_tables = "\n".join(hive_lines).replace("$ENTITY_PATH$", entity_path).replace("$LINK_PATH$",
                                                                                                         link_path)
                hive_create_tables = hive_create_tables.split(";\n")

                for ct in hive_create_tables:
                    if "{0}_entity".format(graph_name) in ct.lower():
                        table_name = "{0}_entity".format(graph_name)
                        hive_drop = "drop table {0}_entity".format(graph_name)
                        cypher_load = "load node into graph {0} from {1}.{0}_entity" \
                                      " with file schema '{2}'".format(graph_name, graph_db, loader_path)
                    elif "{0}_link".format(graph_name) in ct.lower():
                        table_name = "{0}_link".format(graph_name)
                        hive_drop = "drop table {0}_link".format(graph_name)
                        cypher_load = "load relationship into graph {0} from {1}.{0}_link" \
                                      " with file schema '{2}'".format(graph_name, graph_db, loader_path)
                    else:
                        raise Exception("Invalid Hive Create Table statement:\n{0}".format(ct))

                    cursor = conn.cursor()
                    try:
                        cursor.execute(hive_use)
                        tables = get_tables(cursor)
                        if table_name in tables:
                            cursor.execute(hive_drop)
                        cursor.execute(ct)
                        cursor.execute(cypher_enter)
                        cursor.execute(cypher_use)
                        cursor.execute(cypher_load)
                        results = cursor.fetchall()
                        for r in results:
                            for i, d in enumerate(cursor.description):
                                logger.info("{0} {1}".format(d[0], r[i]))
                    finally:
                        cursor.close()
        finally:
            conn.close()

        return []


KgJob.register("twinc", KgTwIncJob)
