# -*- coding: utf-8 -*-

"""
kg json to application/n-quads job

See `https://www.w3.org/TR/n-quads/` for the format

This is used as an export format to RDF based systems

Since n-quads is a flat order independent format, it is well suited to be processed by spark in parallel

We need to use (merged full) schema to generate rdfs definitions

We need graph id and a global prefix as the namespace for this graph

We need several standard namespaces: xsd, rdf, rdfs, owl, etc

The recommend file extension is `.nq` so the output should be graph.nq
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

from typing import Text

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode
from kgpipeline.sparkutil import to_text_dataframe, current_context

from kgpipeline.rdf import cns_item_to_nquads, cns_schema_to_nquads
from pyspark import Broadcast, RDD
from pyspark.sql import DataFrame


def convert_one(df, broadcast_schema, default_graph_id):  # type: (DataFrame, Broadcast, Text) -> RDD
    return df.rdd.flatMap(lambda x: cns_item_to_nquads(broadcast_schema.value,
                                                       entity_json_decode(x[0]),
                                                       default_graph_id)).distinct()


class KgNQuadsJob(KgJob):
    def process(self, inputs):
        if len(inputs) != 2:
            raise JobInputMisMatchError("needs exactly two input json files (entity.json and link.json) "
                                        "for N-Quads Job")

        graph_id = self.config.get("graph_id")
        if not graph_id:
            raise JobConfigError("Needs Graph ID to proceed!")

        schema = self.config.get("schema")
        if not schema:
            raise JobConfigError("Needs schema to proceed!")

        schema_rdd = current_context().parallelize(cns_schema_to_nquads(schema.value), 1)

        entity_json, _ = inputs[0]
        link_json, _ = inputs[1]

        entity_rdd = convert_one(entity_json, schema, graph_id)
        link_rdd = convert_one(link_json, schema, graph_id)

        graph_rdd = entity_rdd.union(link_rdd)

        return [to_text_dataframe(schema_rdd), to_text_dataframe(graph_rdd)]


KgJob.register("nquads", KgNQuadsJob)
