# -*- coding: utf-8 -*-

"""
Load all data to TG.

Steps:
    1. drop graph {name}
    2. create graph {name} ()
    3. create schema_change job define_{name}
    4. run schema_change job define_{name} # we need to first create the schema before creating loading job
    5. create loading job load_{name} for graph {name} {...}
    6. upload data to REST++ server in parallel with spark partitions

Config Parameters:
    1. config: server, TigerGraph Server IP (both GSQL server and REST++ server with different ports)
    2. config optional: username, default tigergraph
    3. config optional: password, default tigergraph

Input Parameters (HDFS):
    1. input: schema.json
    2. input: entity.json
    3. input: link.json

Note:
    1. Full data loading will erase existing data, jobs, and queries
    2. Previous 5 steps will be in one GSQL file and will be executed in one step by GSQL server
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.sparkutil import text_dataframe_to_json
import kgpipeline.tg as tg
import logging

from gsql_client import Client, RESTPP

logger = logging.getLogger(__name__)


def log_tg(cmd):
    logger.info('tg run: <\n' + cmd + '\n>')


def tg_use(client, graph_name):
    log_tg('use graph {}'.format(graph_name))
    return client.use(graph_name)


def tg_catalog(client):
    log_tg('ls')
    return client.catalog()


def tg_command(client, cmd):
    log_tg(cmd)
    return client.command(cmd)


def tg_run_multiple(client, cmds):
    log_tg('\n'.join(cmds))
    return client.run_multiple(cmds)


def get_token(graph_name, client, restpp_server):
    secret = client.get_secret(graph_name, "kg_load")
    if secret:
        r = RESTPP(restpp_server)
        # by default we request a token of 24 hours
        return r.request_token(secret, 3600 * 24)


def get_uploader(server_ip, graph, load_job_name, load_file, batch_size, token=""):
    def __uploader__(index, items):
        c = RESTPP(server_ip)
        if token:
            c.set_token(token)
        try:
            total_count = 0
            batch_count = 0
            batch = []
            for line in items:
                batch_count += 1
                batch.append(line)
                if batch_count > batch_size:
                    total_count += batch_count
                    batch_count = 0
                    c.load(graph, batch, tag=load_job_name, filename=load_file)
                    batch = []
            if batch_count > 0:
                c.load(graph, batch, tag=load_job_name, filename=load_file)
                total_count += batch_count
            yield "partition {0} loaded total {1} items".format(index, total_count)
        except Exception as e:
            yield e

    return __uploader__


def upload_rdd(server, graph_name, rdd, key, token=""):
    logger.info("upload {0}.json...".format(key))
    uploader = get_uploader(server, graph_name, "load_{0}".format(graph_name), key, 100000, token)
    results = rdd.mapPartitionsWithIndex(uploader).collect()
    logger.info("{0} upload complete:".format(key))
    for r in results:
        if isinstance(r, Exception):
            raise r
        logger.info(r)


class KgTgJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 3:
            raise JobInputMisMatchError("needs exactly 3 input files for Tiger Graph Data Loading Job")

        server = self.config.get("server")
        if server is None:
            raise JobConfigError("TigerGraph server address must be specified!")

        if ":" in server:
            server_ip = server.split(":")[0]
        else:
            server_ip = server

        restpp_server = self.config.get("restpp_server", server_ip)

        graph_name = self.config.get("graph_name")
        if graph_name is None:
            raise JobConfigError("TigerGraph graph name must be specified!")

        version = self.config.get("version", "")
        commit = self.config.get("commit", "")

        username = self.config.get("username", "tigergraph")
        password = self.config.get("password", "tigergraph")

        restpp_auth = self.config.get("restpp_auth", False)

        tg_schema = text_dataframe_to_json(inputs[0][0])

        gsql = tg.generate_full_loading_gsql(tg_schema, graph_name)

        # logger.info("GSQL to execute:\n" + "\n".join(gsql))

        client = Client(server, username, password, version=version, commit=commit)

        token = ""

        if not client.login():
            raise Exception("Login failed! Check that the server is running properly!")

        try:
            # we need to first check if the graph exists
            # and the jobs related to this graph
            # we need to first delete all jobs
            # then we drop the graph
            cata = tg_catalog(client)
            if graph_name in cata["graphs"]:
                logger.info("Graph {0} already exists!"
                            "We are going to delete all queries and jobs before deleting the graph.".format(graph_name))

                tg_use(client, graph_name)
                tg_command(client, "drop query *")
                tg_command(client, "drop job *")

                logger.info("We are going to drop graph {0}!".format(graph_name))

                tg_command(client, "drop graph {0}".format(graph_name))

            tg_run_multiple(client, gsql)  # creates a new graph

            if restpp_auth:
                token = get_token(graph_name, client, restpp_server)
        finally:
            client.quit()

        # now we create all necessary schema and jobs
        # load data by rest++ endpoints
        entity_text = inputs[1][0].rdd.map(lambda x: x["value"])
        link_text = inputs[2][0].rdd.map(lambda x: x["value"])

        upload_rdd(restpp_server, graph_name, entity_text, "entity", token)
        upload_rdd(restpp_server, graph_name, link_text, "link", token)

        return []


KgJob.register("tg", KgTgJob)
