# -*- coding: utf-8 -*-

"""
from cnSchema pkl and kg data to Transwarp StellarDB data:
1. generate a json based schema for Transwarp
2. generate entity.csv, various link csv, and corresponding load.json
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode
from kgpipeline.sparkutil import json_to_text_dataframe, lines_to_text_dataframe, to_simple_typed_dataframe
from kgpipeline.tools.json2csv import get_json_rdd_keys
from pyspark.sql.types import TimestampType

import kgpipeline.tw as tw
import logging

logger = logging.getLogger(__name__)


def json_rdd_to_tw_df(rdd, schema):
    """
    将json rdd（entity或link）转换为符合 Transwarp StellarDB 输入的 DataFrame
    """
    keys_with_list_type = get_json_rdd_keys(rdd)
    headers = tw.json_keys_to_tw(keys_with_list_type)
    func = tw.get_data_extractor(headers)
    rdd = rdd.map(func)

    fields = tw.get_loader_typed_fields(schema, headers)

    df = to_simple_typed_dataframe(rdd, fields, tw.DATAFRAME_TYPE_MAPPING)

    for k, t in fields:
        if t == "local_datetime":
            df = df.withColumn(k, df[k].cast(TimestampType()))

    return df


class KgTwDataJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 2:
            raise JobInputMisMatchError("needs exactly 2 input files for Transwarp StellarDB Data Conversion Job")

        schema = self.config.get("schema")
        if not schema:
            raise JobConfigError("requires schema parameter!")

        graph_name = self.config.get("graph_name")
        if not graph_name:
            raise JobConfigError("requires graph_name parameter!")

        shard_number = self.config.get("shard_number", 3)

        kg_schema = schema.value

        entity, _ = inputs[0]
        link, _ = inputs[1]

        tw_schema = tw.create_empty_tw_schema(graph_name, shard_number)
        tw_loader = tw.create_empty_load_schema()

        output = []
        hiveql = []

        if entity and not entity.rdd.isEmpty():
            entity = entity.rdd.map(lambda x: entity_json_decode(x["value"]))
            vertex_df = json_rdd_to_tw_df(entity, kg_schema)
            output.append(vertex_df)
            fields = tw.get_schema_typed_fields(kg_schema, vertex_df.schema.names)

            vertex_table = tw.create_vertex_table_schema(fields, [])
            tw_schema["vertex.tables"].append(vertex_table)

            loader_fields = tw.get_loader_typed_fields(kg_schema, vertex_df.schema.names)
            tw_loader["loaders"].append(tw.create_vertex_loader(fields))
            entity_ext_tbl = tw.generate_external_table_hql("{0}_entity".format(graph_name), loader_fields,
                                                            "$ENTITY_PATH$")
            hiveql.extend(entity_ext_tbl)

        if link and not link.rdd.isEmpty():
            link = link.rdd.map(lambda x: entity_json_decode(x["value"])).cache()
            link_types = link.map(lambda x: x["@type"][0]).distinct().collect()

            link_df = json_rdd_to_tw_df(link, kg_schema)
            output.append(link_df)

            fields = tw.get_schema_typed_fields(kg_schema, link_df.schema.names)
            tw_loader["loaders"].append(tw.create_edge_loader(fields))

            loader_fields = tw.get_loader_typed_fields(kg_schema, link_df.schema.names)
            link_ext_tbl = tw.generate_external_table_hql("{0}_link".format(graph_name), loader_fields,
                                                          "$LINK_PATH$")
            hiveql.extend(link_ext_tbl)

            for t in link_types:
                edge_table = tw.create_edge_table_schema(t, fields, ["__EXTRAID"])
                tw_schema["edge.tables"].append(edge_table)
        else:
            output.append(None)

        res = [json_to_text_dataframe(tw_schema), json_to_text_dataframe(tw_loader), lines_to_text_dataframe(hiveql)]
        res.extend(output)

        return res


KgJob.register("twdata", KgTwDataJob)
