# -*- coding: utf-8 -*-

"""
Generate diff of a graph for StellarDB. Steps:
1. config: schema; input: schema_old.json, entity.json, link.json, entity_old.json, link_old.json
2. generate new schema from entity.json and link.json
3. generate diff: entity_diff.orc, link_diff.orc, hive.hql, loader.json, entity_delete.csv, link_delete.csv
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from typing import List, Dict, Set, AnyStr

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.calc.rdd_diff import rdd_diff_by_id, handle_diff
from kgpipeline.calc.diff import is_dict_same
from kgpipeline.jsonutil import entity_json_decode
from kgpipeline.sparkutil import json_to_text_dataframe, lines_to_text_dataframe, to_simple_typed_dataframe, \
    text_dataframe_to_rdd, text_dataframe_to_json, to_all_string_dataframe
from pyspark.sql.types import TimestampType

import kgpipeline.tw as tw
import logging

logger = logging.getLogger(__name__)


def json_rdd_to_tw_df(rdd, schema):
    """
    将json rdd（entity或link）转换为符合 Transwarp StellarDB 输入的 DataFrame
    """
    headers = tw.get_json_rdd_headers(rdd)
    func = tw.get_data_extractor(headers)
    rdd = rdd.map(func)

    fields = tw.get_loader_typed_fields(schema, headers)

    df = to_simple_typed_dataframe(rdd, fields, tw.DATAFRAME_TYPE_MAPPING)

    for k, t in fields:
        if t == "local_datetime":
            df = df.withColumn(k, df[k].cast(TimestampType()))

    return df


TW_IGNORED = {"dateModified"}


def diff_handler_tw(dict1, dict2, ignore):  # type: (Dict, Dict, Set[AnyStr]) -> List[Dict]
    """
    星环发版用的diff handler。
    星环批量导入功能目前不支持细粒度修改属性，所以有更新的实体都要完整更新（覆盖）。因此我们把更新的和新增的都作为完整
    元素进行批量导入。
    """
    if dict1 is None:  # delete: filter by not having @type
        res = {"@id": dict2["@id"]}
        if "in" in dict2:
            res["in"] = dict2["in"]
        if "out" in dict2:
            res["out"] = dict2["out"]
        return [res]
    elif is_dict_same(dict1, dict2, ignore):
        return []
    else:
        return [dict1]


class KgTwDiffJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 5:
            raise JobInputMisMatchError("needs exactly 5 input files for Transwarp StellarDB Diff Job")

        schema = self.config.get("schema")
        if not schema:
            raise JobConfigError("requires schema parameter!")

        kg_schema = schema.value

        old_tw_schema = text_dataframe_to_json(inputs[0][0])
        graph_name = old_tw_schema["graph.name"]
        graph_shard_number = old_tw_schema["graph.shard.number"]

        entity, _ = inputs[1]
        link, _ = inputs[2]

        # start full schema generation
        tw_schema = tw.create_empty_tw_schema(graph_name, graph_shard_number)

        entity = text_dataframe_to_rdd(entity, entity_json_decode)
        if not entity.isEmpty():
            headers = tw.get_json_rdd_headers(entity)
            fields = tw.get_schema_typed_fields(kg_schema, headers)
            vertex_table = tw.create_vertex_table_schema(fields, ["kg_type"])
            tw_schema["vertex.tables"].append(vertex_table)

        link = text_dataframe_to_rdd(link, entity_json_decode)
        if not link.isEmpty():
            link_types = link.map(lambda x: x["@type"][0]).distinct().collect()
            headers = tw.get_json_rdd_headers(link)
            fields = tw.get_schema_typed_fields(kg_schema, headers)
            for t in link_types:
                edge_table = tw.create_edge_table_schema(t, fields, ["kg_type"])
                tw_schema["edge.tables"].append(edge_table)
        # full schema generation done

        # diff generation
        entity_old, _ = inputs[3]
        link_old, _ = inputs[4]

        entity_old = text_dataframe_to_rdd(entity_old, entity_json_decode)
        link_old = text_dataframe_to_rdd(link_old, entity_json_decode)

        entity_diff = rdd_diff_by_id(entity, entity_old)
        entity_diff = handle_diff(entity_diff, TW_IGNORED, diff_handler_tw)
        entity_delete = entity_diff.filter(lambda x: "@type" not in x)
        entity_upsert = entity_diff.filter(lambda x: "@type" in x)

        link_diff = rdd_diff_by_id(link, link_old)
        link_diff = handle_diff(link_diff, TW_IGNORED, diff_handler_tw)
        link_delete = link_diff.filter(lambda x: "@type" not in x)
        link_upsert = link_diff.filter(lambda x: "@type" in x)
        # diff generation done

        tw_schema_diff = tw.diff_schema(tw_schema, old_tw_schema)

        # diff loader
        tw_loader = tw.create_empty_load_schema()  # loader for diff
        output = []
        hiveql = []

        if entity_upsert and not entity_upsert.isEmpty():
            vertex_df = json_rdd_to_tw_df(entity_upsert, kg_schema)
            output.append(vertex_df)
            fields = tw.get_schema_typed_fields(kg_schema, vertex_df.schema.names)
            tw_loader["loaders"].append(tw.create_vertex_loader(fields))

            loader_fields = tw.get_loader_typed_fields(kg_schema, vertex_df.schema.names)
            entity_ext_tbl = tw.generate_external_table_hql("{0}_entity".format(graph_name), loader_fields,
                                                            "$ENTITY_PATH$")
            hiveql.extend(entity_ext_tbl)
        else:
            output.append(None)

        if link_upsert and not link_upsert.isEmpty():
            link_df = json_rdd_to_tw_df(link_upsert, kg_schema)
            output.append(link_df)

            fields = tw.get_schema_typed_fields(kg_schema, link_df.schema.names)
            tw_loader["loaders"].append(tw.create_edge_loader(fields))

            loader_fields = tw.get_loader_typed_fields(kg_schema, link_df.schema.names)
            link_ext_tbl = tw.generate_external_table_hql("{0}_link".format(graph_name), loader_fields,
                                                          "$LINK_PATH$")
            hiveql.extend(link_ext_tbl)
        else:
            output.append(None)
        # diff loader done

        res = [json_to_text_dataframe(tw_schema), json_to_text_dataframe(tw_schema_diff),
               to_all_string_dataframe(entity_delete.map(lambda x: (x["@id"],)), ["id"]),
               to_all_string_dataframe(link_delete.map(lambda x: (x["@id"], x["in"], x["out"])), ["id", "in", "out"]),
               json_to_text_dataframe(tw_loader), lines_to_text_dataframe(hiveql)]
        res.extend(output)

        return res


KgJob.register("twdiff", KgTwDiffJob)
