# -*- coding: utf-8 -*-

"""
from cnSchema pkl and kg data to TigerGraph data:
1. generate a json based schema for TG
2. generate reformatted entity and link data file
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode
from kgpipeline.sparkutil import to_text_dataframe, json_to_text_dataframe
import kgpipeline.tg as tg
import logging

logger = logging.getLogger(__name__)


def combo_type(schema, cns_item):
    types = cns_item.pop("@type")
    cns_item["_t"] = "_".join(schema.get_main_types(types))
    cns_item["_id"] = cns_item.pop("@id")
    if "in" in cns_item:
        cns_item["_i"] = cns_item.pop("in")
    if "out" in cns_item:
        cns_item["_o"] = cns_item.pop("out")
    return cns_item


def set_in_type(x):
    if x[1]:
        x[0]["_it"] = x[1]
    return x[0]


def set_out_type(x):
    if x[1]:
        x[0]["_ot"] = x[1]
    return x[0]


class KgTgDataJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 2:
            raise JobInputMisMatchError("needs exactly 2 input files for Tiger Graph Data Conversion Job")

        schema = self.config.get("schema")
        if not schema:
            raise JobConfigError("requires schema parameter!")

        excluded_keys = set(self.config.get("excluded", []))

        if excluded_keys:
            invalid_keys = excluded_keys & tg.TG_KEYS
            if invalid_keys:
                verb = "are" if len(invalid_keys) > 1 else "is"
                logger.error("Keys: {0} {1} needed by TG, they not allowed to be excluded!".format(
                    ", ".join(invalid_keys), verb))

        excluded_keys.update(tg.EXCLUDED_KEYS)

        def extract_properties(x):
            res = []
            t = x["_t"]
            for k in x.keys():
                if k not in excluded_keys and k not in tg.TG_KEYS:
                    if isinstance(x[k], list):
                        res.append(((t, k), 1))
                    else:
                        res.append(((t, k), 0))
            return res

        def exclude_data(x):
            for k in excluded_keys:
                if k in x:
                    del x[k]
            return x

        entity, _ = inputs[0]
        link, _ = inputs[1]

        entity = entity.rdd.map(lambda x: entity_json_decode(x["value"])).map(
            lambda x: combo_type(schema.value, x)).map(exclude_data)

        link = link.rdd.map(lambda x: entity_json_decode(x["value"])).map(
            lambda x: combo_type(schema.value, x)).map(exclude_data)

        link_partitions = link.getNumPartitions()

        logger.info("Link partitions: {0}".format(link_partitions))

        entity_types = entity.map(lambda x: (x["_id"], x["_t"]))
        link_in_out = link.map(lambda x: (x["_i"], (x["_id"], x["_t"], x["_o"])))

        # [kgid => (_type, _in_type, _out_type)]
        link_in_out_types = link_in_out.leftOuterJoin(entity_types).map(
            lambda x: (x[1][0][2], (x[1][0][0], x[1][0][1], x[1][1]))).leftOuterJoin(entity_types).map(
            lambda x: (x[1][0][0], (x[1][0][1], x[1][0][2], x[1][1]))
        ).partitionBy(link_partitions).persist()

        entity_properties = entity.flatMap(extract_properties).reduceByKey(lambda x, y: x + y).map(
            lambda x: (x[0][0], x[0][1], x[1])).collect()
        link_properties = link.flatMap(extract_properties).reduceByKey(lambda x, y: x + y).map(
            lambda x: (x[0][0], x[0][1], x[1])).collect()

        # check if a link type has parallel edges between same pair of in out vertices (i.e. multigraph)
        multigraph_link_types = link.map(lambda x: ((x["_t"], x["_i"], x["_o"]), 1)).reduceByKey(
            lambda x, y: x + y).filter(
            lambda x: x[1] > 1).map(
            lambda x: x[0][0]).distinct().collect()

        relations = link_in_out_types.map(lambda x: x[1]).distinct().collect()

        tg_schema = tg.create_tg_schema(schema.value, entity_properties, link_properties, relations,
                                        multigraph_link_types, excluded_keys)

        filtered_keys = tg_schema["filtered_keys"]

        entity = entity.map(lambda x: tg.convert_data(filtered_keys, x))
        link = link.map(lambda x: tg.convert_data(filtered_keys, x))

        links_need_in_type = set()
        links_need_out_type = set()

        for link_type, link_item in tg_schema["edges"].items():
            if len(link_item["from"]) > 1:
                links_need_in_type.add(link_type)
            if len(link_item["to"]) > 1:
                links_need_out_type.add(link_type)

        if len(links_need_in_type) > 0 or len(links_need_out_type) > 0:
            link = link.map(lambda x: (x["_id"], x)).partitionBy(link_partitions).persist()

            link_in_types = link_in_out_types.filter(
                lambda x: x[1][0] in links_need_in_type).mapValues(
                lambda x: x[1])

            link_out_types = link_in_out_types.filter(
                lambda x: x[1][0] in links_need_out_type).mapValues(
                lambda x: x[2])

            if len(links_need_in_type) > 0:
                link = link.leftOuterJoin(link_in_types).mapValues(set_in_type)

            if len(links_need_out_type) > 0:
                link = link.leftOuterJoin(link_out_types).mapValues(set_out_type)

            link = link.map(lambda x: x[1])

        return [json_to_text_dataframe(tg_schema), to_text_dataframe(entity, entity_json_encode),
                to_text_dataframe(link, entity_json_encode)]


KgJob.register("tgdata", KgTgDataJob)
