# -*- coding: utf-8 -*-

"""
kg json to redis json job

redis json is a json stream file of maps of name to entities

{"某某基金": [{},{}]}
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode
from kgpipeline.sparkutil import to_text_dataframe

# 汉字映射
chinese_number = '零一二三四五六七八九'


def normalize_key(key):
    # map chinese number
    temp = ''
    for i in key:
        pos = chinese_number.find(i)
        if pos >= 0:
            temp += str(pos)
        else:
            temp += i
    # lowercase
    temp = temp.lower()
    return temp


def is_valid_entity(x):
    return "@id" in x and "@type" in x and x.get("name")


def get_entity_extractor(keys):
    def __extractor__(x):
        if is_valid_entity(x):
            e = {}
            for key in keys:
                if key in x:
                    e[key] = x.get(key)

            alts = [x["name"]]
            alts.extend(x.get("alternateName", []))

            key_set = set(map(normalize_key, alts))

            return [(key, e) for key in key_set]
        else:
            return []

    return __extractor__


class KgRedisJsonJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 1:
            raise JobInputMisMatchError("needs exactly one input json file for a Redis Json Job")

        extra_keys = self.config.get("extra_keys", [])
        partition = self.config.get("partition", 10)

        keys = ["@id", "@type", "name"]
        keys.extend(extra_keys)

        extractor = get_entity_extractor(keys)

        df, _ = inputs[0]

        result_rdd = df.rdd.map(lambda x: entity_json_decode(x["value"])).flatMap(
            extractor).groupByKey(
            partition).map(lambda x: (x[0], list(x[1])))

        return [to_text_dataframe(result_rdd, entity_json_encode)]


KgJob.register("redisjson", KgRedisJsonJob)
