# -*- coding: utf-8 -*-

"""
Load all data to Transwarp StellarDB.

Steps:
    1. prepare hive data:
       use {graph_db};
       drop table {graph_name}_entity;
       drop table {graph_name}_link;
       create table from hive.hql with location {entity_path} and {link_path} substituted
    2. enter cypher mode
       drop graph {graph_name}
       create graph {graph_name} with file schema 'schema_path'
    3. load node into graph {graph_name} from {graph_db}.{graph_name}_entity with file schema '{loader_path}'
    4. load relationship into graph {graph_name} from {graph_db}.{graph_name}_link with file schema '{loader_path}'
    5. enter HiveQL mode
       drop table {graph_name}_entity;
       drop table {graph_name}_link;

Config Parameters:
    1. config:
       server_ip, HiveServer2 IP address
       server_port, HiveServer2 Port (default 10000)
    2. graph_name: graph name to use
    3. graph_db: hive db used for graph import data
    4. loader_path: loader.json path in StellarDB HDFS
    5. schema_path: schema.json path in StellarDB HDFS
    6. entity_path: entity.orc path in StellarDB HDFS
    7. link_path: link.orc path in StellarDB HDFS

Input:
    1. hive.hql file

Note:
    1. Full data loading will erase existing data
    2. No input except hive.hql is explicitly required as others are already copied to target HDFS
    3. Kerberos authentication is required in production HiveServer2 deployment, and current job does not handle that
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.sparkutil import text_dataframe_to_lines
import logging

from impala.dbapi import connect

logger = logging.getLogger(__name__)


def get_single_column_result_as_set(cursor, stmt, exclude=None):
    res = set()
    if not exclude or not isinstance(exclude, set):
        exclude = set()
    cursor.execute(stmt)
    results = cursor.fetchall()
    for r in results:
        if r[0] not in exclude:
            res.add(r[0])
    return res


def get_graphs(cursor, exclude=None):
    return get_single_column_result_as_set(cursor, "show graphs", exclude)


def get_tables(cursor, exclude=None):
    return get_single_column_result_as_set(cursor, "show tables", exclude)


class KgTwJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 1:
            raise JobInputMisMatchError("needs exactly 1 input files for StellarDB Data Loading Job")

        server_ip = self.config.get("server_ip")
        if server_ip is None:
            raise JobConfigError("StellarDB Cypher Server IP address must be specified!")

        server_port = self.config.get("server_port", 10000)

        username = self.config.get("username")
        password = self.config.get("password")

        graph_name = self.config.get("graph_name")
        if graph_name is None:
            raise JobConfigError("StellarDB graph name must be specified!")

        graph_db = self.config.get("graph_db")
        if graph_db is None:
            raise JobConfigError("StellarDB Hive Database must be specified!")

        loader_path = self.config.get("loader_path")
        if loader_path is None:
            raise JobConfigError("loader.json path on StellarDB HDFS must be specified!")

        schema_path = self.config.get("schema_path")
        if schema_path is None:
            raise JobConfigError("schema.json path on StellarDB HDFS must be specified!")

        entity_path = self.config.get("entity_path")
        if entity_path is None:
            raise JobConfigError("entity orc path on StellarDB HDFS must be specified!")

        link_path = self.config.get("link_path")
        if link_path is None:
            raise JobConfigError("link orc path on StellarDB HDFS must be specified!")

        hive_lines = text_dataframe_to_lines(inputs[0][0])
        # hive_enter = "set query.lang = hql"
        hive_use = "use {0}".format(graph_db)
        hive_drop_entity = "drop table {0}_entity".format(graph_name)
        hive_drop_link = "drop table {0}_link".format(graph_name)
        hive_create_tables = "\n".join(hive_lines).replace("$ENTITY_PATH$", entity_path).replace("$LINK_PATH$",
                                                                                                 link_path)
        hive_create_tables = hive_create_tables.split(";\n")
        cypher_enter = "set query.lang = cypher"
        cypher_drop = "drop graph {0}".format(graph_name)
        cypher_create = "create graph {0} with file schema '{1}'".format(graph_name, schema_path)
        # cypher_use = "use graph {0}".format(graph_name)
        cypher_load_node = "load node into graph {0} from {1}.{0}_entity with file schema '{2}'".format(
            graph_name, graph_db, loader_path
        )
        cypher_load_rel = "load relationship into graph {0} from {1}.{0}_link with file schema '{2}'".format(
            graph_name, graph_db, loader_path
        )

        # TODO: handle Kerberos authentication
        if username:
            conn = connect(host=server_ip, port=server_port, auth_mechanism="PLAIN", user=username, password=password)
        else:
            conn = connect(host=server_ip, port=server_port, auth_mechanism="PLAIN")
        cursor = conn.cursor()

        try:
            cursor.execute(hive_use)
            tables = get_tables(cursor)
            if "{0}_entity".format(graph_name) in tables:
                cursor.execute(hive_drop_entity)
            if "{0}_link".format(graph_name) in tables:
                cursor.execute(hive_drop_link)
            for ct in hive_create_tables:
                cursor.execute(ct)

            cursor.execute(cypher_enter)
            graphs = get_graphs(cursor, {graph_db, "default", "system"})
            if graph_name in graphs:
                cursor.execute(cypher_drop)

            cursor.execute(cypher_create)

            cursor.execute(cypher_load_node)
            results = cursor.fetchall()
            for r in results:
                for i, d in enumerate(cursor.description):
                    logger.info("{0} {1}".format(d[0], r[i]))

            cursor.execute(cypher_load_rel)
            results = cursor.fetchall()
            for r in results:
                for i, d in enumerate(cursor.description):
                    logger.info("{0} {1}".format(d[0], r[i]))
        finally:
            cursor.close()
            conn.close()

        return []


KgJob.register("tw", KgTwJob)
