# -*- coding: utf-8 -*-

"""
kg calc job
"""

from __future__ import unicode_literals
from __future__ import absolute_import

from importlib import import_module

from pyspark.rdd import RDD

from pyspark.sql import DataFrame
from pyspark.broadcast import Broadcast

from kgpipeline.job import KgJob
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode
from kgpipeline.sparkutil import current_session, current_context, to_text_dataframe

import logging

logger = logging.getLogger(__name__)


# sc: sparkContext
# class_name: full java class name
def get_java_class(sc, class_name):
    try:
        r = getattr(sc, '_jvm')
        for s in class_name.split('.'):
            r = getattr(r, s)
        return r
    except Exception:
        print("fail to get java class: {}".format(class_name))
        raise


def remove_broadcast_value(config):
    conf = {}
    for k, v in config.items():
        if type(v) != Broadcast:
            conf[k] = v
    return conf


class KgCalcJob(KgJob):
    """
    calc by using calc function specified in config

    config includes:
    - kg_module: the module to load, where calc function is defined
    - calc_java_class: the java class where calc function is defined, need to provide jar to spark
    - calc_func: name of the calc function, default is ``calc``

    for python calc func, parameters are one or more RDDs with dict, and kwargs:
    `calc(pyspark.rdd.RDD, ..., **kwargs)`

    and it returns RDDs with dict:
    `pyspark.rdd.RDD, ...`

    for java calc func, parameters are one or more text DataFrames with json, and Map:
    `calc(org.apache.spark.sql.DataFrame, ..., java.util.Map<String, Object>)`

    and it returns text DataFrames with json:
    `java.util.List<org.apache.spark.sql.DataFrame>`

    NOTE: either specify `kg_module` or `calc_java_class`

    NOTE: example of calc in scala: http://gitlab.ruyi.ai/kg-code/kg-util/tree/master/Calc

    input of calc function are text RDDs and config. output is also text RDD.

    There are some predefined calc functions provided in `calc`.
    """
    def __init__(self, config, reader):
        super(KgCalcJob, self).__init__(config, reader)
        module = self.config.get("kg_module")
        if module:
            self._kg_module = import_module(module)
        self._func = self.config.get("calc_func", "calc")
        self._java_class = self.config.get("calc_java_class")

    def process(self, inputs):
        """
        kg calc process
        """
        logger.info("Start KgCalcJob: {} ...".format(self.name))

        if len(inputs) == 0:
            logger.warning("No input sources ... abort!")
            return []

        spark = current_session()
        sc = current_context()

        if self._java_class:
            # note:
            # for java calc
            # input and output are text dataframes with json

            klass = get_java_class(sc, self._java_class)
            obj = klass()
            calc_func = getattr(obj, self._func)

            dfs = [df._jdf if df is not None else None for df, _ in inputs]

            # remove broadcast value, otherwise:
            # AttributeError: 'Broadcast' object has no attribute '_get_object_id'
            conf = remove_broadcast_value(self.config)
            params = dfs + [conf]

            results = calc_func(*params)
            rs = [DataFrame(r, spark) for r in results]
            return rs

        else:
            # note:
            # for python calc
            # input and output are rdds with dict
            rdd_list = [
                df.rdd.map(lambda x: entity_json_decode(x["value"])) if df is not None else sc.emptyRDD() for
                df, _ in inputs]

            calc_func = getattr(self._kg_module, self._func)

            res = calc_func(*rdd_list, **self.config)

            return [to_text_dataframe(rdd, entity_json_encode) for rdd in res]


KgJob.register("calc", KgCalcJob)
