# -*- coding: utf-8 -*-

"""
Load data to TG incrementally

1. we will only proceed if the schema change is valid or nothing is changed
2. we will first run job delete_{graph_name} job to delete links and then delete entities
3. if there is any schema change:
   > drop old load_{graph_name} and old delete_{graph_name};
   > run define_{graph_name} job; and drop the job;
   > create new load_{graph_name} and new delete_{graph_name};
4. for entity and link add/update, we post them to new load_{graph_name} job, just like the full publish (by restpp);
   note that since we are loading with json file, missing string tokens are NOT updated (as the official doc says)
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

import logging

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode
from kgpipeline.jobs.tgjob import get_uploader, get_token
from kgpipeline.tg import generate_loading_job, generate_delete_job, generate_update_schema_change_job, generate_typedef
from kgpipeline.sparkutil import text_dataframe_to_json

from gsql_client import Client

logger = logging.getLogger(__name__)


def delete_rdd(server, graph_name, rdd, key, token=""):
    logger.info("deleting {0}_diff.json...".format(key))
    deleter = get_uploader(server, graph_name, "delete_{0}".format(graph_name), key, 100000, token)
    deleted = rdd.filter(lambda x: x["_diff"] == "delete").map(lambda x: entity_json_encode(x))
    results = deleted.mapPartitionsWithIndex(deleter).collect()
    logger.info("deleting {0} complete:".format(key))
    for r in results:
        if isinstance(r, Exception):
            raise r
        logger.info(r)


def update_rdd(server, graph_name, rdd, key, token=""):
    logger.info("updating {0}_diff.json...".format(key))
    updater = get_uploader(server, graph_name, "load_{0}".format(graph_name), key, 100000, token)
    updated = rdd.filter(lambda x: x["_diff"] != "delete").map(lambda x: entity_json_encode(x))
    results = updated.mapPartitionsWithIndex(updater).collect()
    logger.info("updating {0} complete:".format(key))
    for r in results:
        if isinstance(r, Exception):
            raise r
        logger.info(r)


class KgTgIncJob(KgJob):
    def process(self, inputs):
        # we need 4 input:
        # 1. schema.json
        # 2. schema_diff.json
        # 3. entity_diff.json
        # 4. link_diff.json
        if len(inputs) != 4:
            raise JobInputMisMatchError("needs exactly 4 input files for Tiger Graph Data Loading Job")

        server = self.config.get("server")
        if server is None:
            raise JobConfigError("TigerGraph server address must be specified!")

        if ":" in server:
            server_ip = server.split(":")[0]
        else:
            server_ip = server

        restpp_server = self.config.get("restpp_server", server_ip)

        graph_name = self.config.get("graph_name")
        if graph_name is None:
            raise JobConfigError("TigerGraph graph name must be specified!")

        version = self.config.get("version", "")
        commit = self.config.get("commit", "")

        username = self.config.get("username", "tigergraph")
        password = self.config.get("password", "tigergraph")

        restpp_auth = self.config.get("restpp_auth", False)

        client = Client(server, username, password, version=version, commit=commit)

        if not client.login():
            raise Exception("Login failed! Check that the server is running properly!")

        client.use(graph_name)

        token = ""
        if restpp_auth:
            token = get_token(graph_name, client, restpp_server)

        tg_schema = text_dataframe_to_json(inputs[0][0])
        tg_schema_diff = text_dataframe_to_json(inputs[1][0])

        entity_text = inputs[2][0].rdd.map(lambda x: entity_json_decode(x["value"]))
        link_text = inputs[3][0].rdd.map(lambda x: entity_json_decode(x["value"]))

        # first we run delete_{graph} job to delete entries
        delete_rdd(restpp_server, graph_name, entity_text, "entity", token)
        delete_rdd(restpp_server, graph_name, link_text, "link", token)

        # make changes to schema and jobs
        tuple_define = generate_typedef(tg_schema_diff["tuples"])
        if tuple_define:
            client.run_multiple(tuple_define)

        define_job_name = "define_{0}".format(graph_name)
        define_gsql = generate_update_schema_change_job(tg_schema_diff, graph_name, define_job_name)

        if define_gsql:
            load_gsql = generate_loading_job(tg_schema, graph_name, "load_{0}".format(graph_name))
            delete_gsql = generate_delete_job(tg_schema, graph_name, "delete_{0}".format(graph_name))
            client.command("drop job load_{0}".format(graph_name))
            client.command("drop job delete_{0}".format(graph_name))
            client.run_multiple(define_gsql)
            client.command("run job {0}".format(define_job_name))
            client.command("drop job {0}".format(define_job_name))
            client.run_multiple(delete_gsql)
            client.run_multiple(load_gsql)

        client.quit()

        # run load_{graph} to update entries
        update_rdd(restpp_server, graph_name, entity_text, "entity", token)
        update_rdd(restpp_server, graph_name, link_text, "link", token)

        return []


KgJob.register("tginc", KgTgIncJob)
