# -*- coding: utf-8 -*-

"""
kg production job
"""

from __future__ import unicode_literals
from __future__ import absolute_import

from importlib import import_module

from kgpipeline.job import KgJob
from kgpipeline.lambdas import common_merge
from kgpipeline.jsonutil import entity_json_encode
from kgpipeline.sparkutil import to_text_dataframe, df_row_to_dict
from kgpipeline.util import wrap_broadcast_variable

import logging

logger = logging.getLogger(__name__)


def merge_by(rdd_list, reducer):
    """
    Merge KG entities
    :param rdd_list: A list of RDDs containing KG entities
    :param reducer: the reduce function used to merge entities
    :return: A RDD of merged entities
    """
    if not rdd_list:
        return None

    rdd = None

    for r in rdd_list:
        if rdd:
            rdd = rdd.union(r)
        else:
            rdd = r

    return rdd.reduceByKey(
        reducer
    ).map(
        lambda a: a[1]
    )


class KgProductionJob(KgJob):
    """
    job for produce entities and links, by user provided function.

    config includes:
    - kg_module: name of module in with the user provided function defined
    - schema: schema pkl path
    - parallelism: input RDD repartition number
    - reducer: reducer function when merging. optional, default `lambdas.common_merge`

    it requires each input spec contain one option: ``mapper``, its value is a user provided function,
    defined in module specified in ``kg_module``.

    this function is of type: f: KgCnSchema, Row -> [dict]. it converts input Row into a list of entities
    and/or links.

    the result entities and liks will be merged together by function reducer.

    the output of this job is [entity RDD, link RDD]
    """
    type_key = "production"

    def __init__(self, config, reader):
        super(KgProductionJob, self).__init__(config, reader)
        self._kg_module = import_module(self.config["kg_module"])
        self._schema = self.config["schema"]
        self._parallelism = self.config.get("parallelism")

    def process(self, inputs):
        """
        kg production process
        """
        logger.info("Start KgProductionJob: {} ...".format(self.name))

        rdd_list = []

        for df, extra_config in inputs:
            mapper_name = extra_config["mapper"]
            mapper = getattr(self._kg_module, mapper_name)
            mapper_wrapped = wrap_broadcast_variable(mapper, self._schema)

            target_rdd = df.rdd.map(df_row_to_dict)

            # ensures that the mapper will at least run without exception
            test_items = target_rdd.take(1)
            if not test_items:
                logger.warning("No input!")
                continue
            test_result = mapper_wrapped(test_items[0])
            if not isinstance(test_result, list):
                raise Exception("mapper {0} should return a list!".format(mapper_name))
            if len(test_result) > 0:
                if any([not isinstance(tr, dict) for tr in test_result]):
                    raise Exception("mapper {0} returned non-dict list elements!".format(mapper_name))
                if any(["@id" not in tr for tr in test_result]):
                    raise Exception(
                        "mapper {0} returned list elements that do not contain \"@id\" field!".format(
                            mapper_name))
                if any(["@type" not in tr for tr in test_result]):
                    raise Exception(
                        "mapper {0} returned list elements that do not contain \"@type\" field!".format(
                            mapper_name))
            # end test

            std_rdd = target_rdd.flatMap(mapper_wrapped).map(
                lambda x: (x["@id"], x))

            if self._parallelism:
                std_rdd = std_rdd.partitionBy(self._parallelism)

            rdd_list.append(std_rdd)

        logger.info("Start merging {} ...".format(self.name))

        reducer_name = self.config.get("reducer")

        if reducer_name:
            reducer = getattr(self._kg_module, reducer_name)
        else:
            reducer = common_merge

        rdd = merge_by(rdd_list, reducer)

        res = []

        if rdd:
            entity = to_text_dataframe(rdd.filter(lambda x: "CnsLink" not in x["@type"]),
                                       entity_json_encode)

            res.append(entity)

            link = to_text_dataframe(rdd.filter(lambda x: "CnsLink" in x["@type"]),
                                     entity_json_encode)

            res.append(link)
            logger.info("Processing {} done.".format(self.name))
        else:
            logger.warning("Processing {} done but no result!".format(self.name))

        return res


KgJob.register("production", KgProductionJob)
