# -*- coding: utf-8 -*-

"""
kg validation job

validate cns_item against schema
1. basic validation
   * presence of @type amd correct @type type (a list of string, not a string)
2. system property
   * @id must be present if Thing in @type
3. definition check
   * class not defined in schema or imported schema
   * property not defined in schema or imported schema
4. UCP template definition
   * undefined class-property binding, unable to find a template for this property
     based on any classes defined in @type

5. CP range

6. CP template cardinality
   1. minCard each instance of 'MutualFund' should have at least one fundCode
   2. minCard each instance of 'MutualFund' should have at most one fundCode
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

import re

from typing import List, AnyStr
from six import string_types

from operator import add

from kgpipeline.util import wrap_broadcast_variable
from kgpipeline.job import KgJob
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode
from kgpipeline.sparkutil import to_all_string_dataframe
from kgpipeline.cnschema import KgCnSchema, SYSTEM_PROPERTIES, is_data_type_compatible

import logging

logger = logging.getLogger(__name__)

ISO8601_REGEX_DATE = re.compile(r"^\d{4}-\d{2}-\d{2}$")
ISO8601_REGEX_DATETIME = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$")


def iso8601_date_parse(date_str):
    if isinstance(date_str, string_types):
        return ISO8601_REGEX_DATE.match(date_str)


def iso8601_datetime_parse(date_str):
    if isinstance(date_str, string_types):
        return ISO8601_REGEX_DATETIME.match(date_str)


def count_template_cp_binding(schema, cns_item):
    res = []
    types = cns_item["@type"]

    # count every occurrence of a type as one
    for t in set(types):
        res.append((("all", t), 1))

    # count class property bindings of main types
    main_types = schema.get_main_types(types)

    properties = set(cns_item.keys())
    properties.difference_update(SYSTEM_PROPERTIES)

    covered_prop = set()

    for main_type in main_types:
        types = schema.get_super_types(main_type)
        prop_set = set()
        for t in types:
            plist = schema.get_property_list(t)
            for p in plist.keys():
                if p in properties:
                    covered_prop.add(p)
                    res.append((("cp", main_type, t, p), 1))
                    res.append((("cpt", main_type, t, p), 1))
                elif p not in prop_set:  # duplicate definition in sub type will override super type
                    res.append((("cp", main_type, t, p), 0))
                    res.append((("cpt", main_type, t, p), 1))
            prop_set.update(plist.keys())

    # count remaining properties as unbind class property
    for p in properties - covered_prop:
        res.append((("ucp", types[0], p), 1))

    return res


def validate_type(schema, cns_item):  # type: (KgCnSchema, dict) -> List[dict]
    bugs = []
    types = cns_item.get("@type")

    if isinstance(types, string_types):
        types = [item.strip() for item in types.strip("[]").split(",")]

        if types:
            bugs.append({
                "category": "warn_rewrite_item",
                "description": "@type got string value",
                "item": cns_item
            })

    if isinstance(types, list):
        new_types = []
        for t in types:
            if not schema.has_definition(t):
                bugs.append({
                    "category": "warn_rewrite_item",
                    "description": "class {0} not defined".format(t),
                    "item": cns_item
                })
            else:
                new_types.append(t)

            if new_types:
                cns_item["@type"] = new_types

                if "@id" not in cns_item:
                    bugs.append({
                        "category": "warn_validate_system_property",
                        "description": "instance missing @id",
                        "item": cns_item
                    })

                if "CnsLink" in new_types:
                    if "in" not in cns_item:
                        bugs.append({
                            "category": "warn_validate_system_property",
                            "description": "instance of [CnsLink] missing in",
                            "item": cns_item
                        })
                    if "out" not in cns_item:
                        bugs.append({
                            "category": "warn_validate_system_property",
                            "description": "instance of [CnsLink] missing out",
                            "item": cns_item
                        })
                return bugs

    bugs.append({
        "category": "warn_validate",
        "description": "missing @type and no expected @type",
        "item": cns_item
    })
    return bugs


def is_valid_cns_item(cns_item):  # type: (dict) -> bool
    types = cns_item.get("@type")
    if not types or not isinstance(types, list):
        return False
    if "Thing" in types and "@id" not in cns_item:
        return False
    if "CnsLink" in types and ("in" not in cns_item or "out" not in cns_item):
        return False
    return True


def validate_template_cp_binding(schema, cns_item):  # type: (KgCnSchema, dict) -> List[dict]
    bugs = []
    types = cns_item["@type"]
    properties = set(cns_item.keys())
    properties.difference_update(SYSTEM_PROPERTIES)

    if "CnsLink" in types:
        properties.remove("in")
        properties.remove("out")

    for t in types:
        plist = schema.get_property_list(t)
        if plist:
            for p, prop_def in plist.items():
                if p in properties:
                    properties.remove(p)
                    bugs.extend(validate_property(schema, prop_def, t, p, cns_item))
    for p in properties:
        bugs.append({
            "category": "warn_validate_template_regular",
            "description": "unable to find a template for property=[{0}] based"
                           " on classes defined in @type=[{1}]".format(p, ", ".join(types)),
            "class": types[0],
            "property": p,
            "value": cns_item
        })
    return bugs


def validate_property(schema, prop, t, p, cns_item):  # type: (KgCnSchema, dict, AnyStr, AnyStr, dict) -> List[dict]
    bugs = []

    pv = cns_item.get(p)

    r = prop["range"]
    min_card = prop.get("min", 0)
    max_card = prop.get("max")

    if not isinstance(pv, list):
        card = 1
    else:
        card = len(pv)

    if card < min_card:
        bugs.append({
            "category": "warn_validate_template_regular",
            "description": "minCardinality",
            "class": t,
            "property": p,
            "expected": min_card,
            "actual": card,
            "item": cns_item,
            "item_name": cns_item.get("name"),
            "item_value": pv
        })

    if max_card and card > max_card:
        bugs.append({
            "category": "warn_validate_template_regular",
            "description": "maxCardinality",
            "class": t,
            "property": p,
            "expected": max_card,
            "actual": card,
            "item": cns_item,
            "item_name": cns_item.get("name"),
            "item_value": pv
        })

    if card == 0:
        return bugs

    if not isinstance(pv, list):
        pv = [pv]

    for v in pv:
        actual_type = type(v)
        if not schema.is_cns_data_structure(r):
            if isinstance(actual_type, dict):  # possible data structure
                bugs.append({
                    "category": "warn_validate_datastructure",
                    "description": "value range not specified as datastructure",
                    "class": t,
                    "property": p,
                    "expected": r,
                    "actual": str(actual_type),
                    "item": cns_item,
                    "item_name": cns_item.get("name"),
                    "item_value": v
                })
            if schema.is_data_type(r):
                if not is_data_type_compatible(r, v):
                    bugs.append({
                        "category": "warn_validate_datatype",
                        "description": "range value datatype mismatch",
                        "class": t,
                        "property": p,
                        "expected": r,
                        "actual": str(actual_type),
                        "item": cns_item,
                        "item_name": cns_item.get("name"),
                        "item_value": v
                    })

                if r.lower() == "date":
                    ret = iso8601_date_parse(v)
                    if not ret:
                        bugs.append({
                            "category": "warn_validate_datatype",
                            "description": "range value is invalid Date string",
                            "class": t,
                            "property": p,
                            "expected": "2018-07-01",
                            "actual": v,
                            "item": cns_item,
                            "item_name": cns_item.get("name"),
                            "item_value": v
                        })

                if r.lower() == "datetime":
                    ret = iso8601_datetime_parse(v)
                    if not ret:
                        bugs.append({
                            "category": "warn_validate_datatype",
                            "description": "range value is invalid DateTime string",
                            "class": t,
                            "property": p,
                            "expected": "2018-07-01T07:02:01",
                            "actual": v,
                            "item": cns_item,
                            "item_name": cns_item.get("name"),
                            "item_value": v
                        })
    return bugs


def split_count(schema, row):
    key, value = row
    main_type, super_type, ref_property = key
    count, total = value
    main_type_zh = schema.get_definition(main_type).get("nameZh")
    super_type_zh = schema.get_definition(super_type).get("nameZh")
    ref_property_zh = schema.get_definition(ref_property).get("nameZh")
    ref_property_range = schema.get_definition(ref_property).get("range")

    if super_type == main_type:
        super_type = "/"
        super_type_zh = "/"

    coverage = "{0:.4f}".format(count / total) if total > 0 else ""
    return main_type, super_type, ref_property, ref_property_range, main_type_zh, super_type_zh, ref_property_zh, str(
        count), coverage


def fill_entity_info(schema, row):
    main_type, count = row
    main_type_zh = schema.get_definition_zh_name(main_type)
    super_types = " < ".join(schema.get_super_types(main_type))
    return main_type, main_type_zh, super_types, count


def generate_report(broadcast_schema, rdd):
    fields = ["main_type", "super_type", "property", "property_range",
              "main_type_zh", "super_type_zh", "property_zh", "count", "coverage"]

    types_original = rdd.filter(lambda x: x[0][0] == "all")

    types_count_rdd = types_original.map(lambda x: (x[0][1], x[1]))

    types_report = types_count_rdd.map(lambda x: fill_entity_info(broadcast_schema.value, x))
    types_report_df = to_all_string_dataframe(types_report,
                                              ["main_type", "main_type_zh",
                                               "super_types", "count"]).orderBy("main_type")

    cp_count = rdd.filter(lambda x: x[0][0] == "cp").map(lambda x: (x[0][1:], x[1]))
    cp_count_total = rdd.filter(lambda x: x[0][0] == "cpt").map(lambda x: (x[0][1:], x[1]))
    cp_count_row = cp_count.join(cp_count_total).map(lambda x: split_count(broadcast_schema.value, x))

    df = to_all_string_dataframe(cp_count_row, fields).orderBy("main_type", "super_type", "property")

    return types_report_df, df


def generate_relation_report(broadcast_schema, rdd):
    fields = ["relation_type", "relation_type_zh", "in_type", "in_type_zh", "out_type", "out_type_zh", "count"]
    relation_count_row = rdd.map(
        lambda x: (x[0][1], broadcast_schema.value.get_type_name_zh(x[0][1]),
                   x[0][0], broadcast_schema.value.get_type_name_zh(x[0][0]),
                   x[0][2], broadcast_schema.value.get_type_name_zh(x[0][2]),
                   x[1])
    )
    df = to_all_string_dataframe(relation_count_row, fields).orderBy("relation_type", "in_type", "out_type")
    return df


def generate_property_error_report(broadcast_schema, rdd):
    fields = ["type", "type_zh", "property", "property_zh", "category", "description", "count", "expected", "actual",
              "item_name", "item_value", "sample"]
    property_error_row = rdd.map(
        lambda x: (x[0],
                   broadcast_schema.value.get_type_name_zh(x[0]),
                   x[1],
                   broadcast_schema.value.get_definition_zh_name(x[1]),
                   x[2],
                   x[3],
                   x[4], x[5], x[6], x[7], x[8], x[9])
    )
    df = to_all_string_dataframe(property_error_row, fields).orderBy("type", "property")
    return df


def get_main_types_as_string(schema, entity):
    return entity["@id"], "|".join(schema.get_main_types(entity["@type"]))


class KgValidationJob(KgJob):
    type_key = "validation"

    def __init__(self, config, reader):
        super(KgValidationJob, self).__init__(config, reader)
        self._schema = self.config["schema"]
        self._parallelism = self.config.get("parallelism")

    def process(self, inputs):
        """
        run a kg validation job

        input is 2 json files: entity.json and link.json
        output is error report, coverage report, and entity list
        """
        logger.info("Start KgValidationJob: {} ...".format(self.name))

        if len(inputs) != 2:
            logger.error("This job takes exactly 2 text input (json) sources... abort!")
            return []

        rdd_list = []

        for df, _ in inputs:
            rdd_list.append(df.rdd.map(lambda x: entity_json_decode(x["value"])))

        entity, link = rdd_list

        valid_entity = entity.filter(is_valid_cns_item)
        valid_link = link.filter(is_valid_cns_item)

        mapper = wrap_broadcast_variable(count_template_cp_binding, self._schema)

        entity_count = valid_entity.flatMap(mapper).reduceByKey(add)
        link_count = valid_link.flatMap(mapper).reduceByKey(add)
        all_count = entity_count.union(link_count)

        types_report, property_count_report = generate_report(self._schema, all_count)

        entity_main_type_mapper = wrap_broadcast_variable(get_main_types_as_string, self._schema)
        entity_types = valid_entity.map(entity_main_type_mapper)

        link_type_with_in_out = valid_link.map(lambda x: (x["in"], (x["@type"][0], x["out"])))

        relation_cnt = link_type_with_in_out.leftOuterJoin(entity_types).map(
            lambda x: (x[1][0][1], (x[1][1], x[1][0][0]))).leftOuterJoin(entity_types).map(
            lambda x: (x[1][0][0], x[1][0][1], x[1][1])
        ).map(lambda x: ((x[0], x[1], x[2]), 1)).reduceByKey(add)

        relation_report = generate_relation_report(self._schema, relation_cnt)

        type_validator = wrap_broadcast_variable(validate_type, self._schema)

        entity_type_bugs = entity.flatMap(type_validator)
        link_type_bugs = link.flatMap(type_validator)

        type_bugs = entity_type_bugs.union(link_type_bugs).map(
            lambda x: ((x["category"], x["description"]), (x["item"], 1))).reduceByKey(
            lambda x, y: (x[0], x[1] + y[1])).map(
            lambda x: (x[0][0], x[0][1], x[1][1], entity_json_encode(x[1][0]))
        )

        type_error_report = to_all_string_dataframe(type_bugs, ["category", "description", "count", "sample"])

        property_validator = wrap_broadcast_variable(validate_template_cp_binding, self._schema)

        entity_property_bugs = valid_entity.flatMap(property_validator)
        link_property_bugs = valid_link.flatMap(property_validator)

        property_bugs = entity_property_bugs.union(link_property_bugs).map(
            lambda x: ((x["class"], x["property"], x["category"], x["description"]), (
                {
                    "expected": x.get("expected"),
                    "actual": x.get("actual"),
                    "item": entity_json_encode(x.get("item")) if x.get("item") is not None else None,
                    "item_name": x.get("item_name"),
                    "item_value": x.get("item_value")
                }, 1
            ))
        ).reduceByKey(lambda x, y: (x[0], x[1] + y[1])).map(
            lambda x: (x[0][0], x[0][1], x[0][2], x[0][3], x[1][1],
                       x[1][0]["expected"], x[1][0]["actual"],
                       x[1][0]["item_name"], x[1][0]["item_value"], x[1][0]["item"])
        )

        property_error_report = generate_property_error_report(self._schema, property_bugs)

        logger.info("Processing {} done.".format(self.name))
        return [types_report, relation_report, property_count_report, type_error_report, property_error_report]


KgJob.register("validation", KgValidationJob)
