# -*- coding: utf-8 -*-

"""
生成schema.pkl，保存在内存中
"""

from __future__ import unicode_literals
from __future__ import absolute_import

import argparse

from typing import Iterable, Dict

from kgpipeline.tools.show_schema import show_schema
from kgpipeline.util import load_json, save_json, save_pickle
from kgpipeline.logger import setup_default_logging

import logging

logger = logging.getLogger(__name__)


class SchemaImportError(Exception):
    pass


def create_full_schema(schemas):  # type: (Iterable[Dict]) -> Dict
    """
    解析依赖关系，合并所有schema

    :param schemas: List[Dict]，schema列表
    :return: 合并后的schema
    """

    # 如果schema是iterator，不能迭代访问两次
    schemas = list(schemas)
    schema_index = {schema["identifier"]: schema for schema in schemas}
    schema_being_depended = {schema["identifier"]: set() for schema in schemas}

    for schema in schemas:
        schema_id = schema["identifier"]
        for i in schema["import"]:
            if i not in schema_index:
                raise SchemaImportError(
                    "{0} imported {1} but {1} jsonld is not provided".format(schema_id, i))
            else:
                schema_being_depended[i].add(schema_id)

    top_schema = None

    for schema_id, s in schema_being_depended.items():
        if len(s) == 0:
            if top_schema is None:
                top_schema = schema_id
            else:
                raise SchemaImportError("there are multiple start schemas provided: only one is supported!")

    if top_schema is None:
        raise SchemaImportError("there is no start schema provided!")

    # 合并所有schema
    definitions = []
    templates = []

    imported = set()
    import_set = {top_schema}
    while import_set:
        temp_set = set()
        for s in import_set:
            if s not in imported:
                logger.debug("loading {}".format(s))
                d = schema_index[s]["@graph"]
                t = schema_index[s]["template"]
                logger.debug("{} definitions, {} templates".format(len(d), len(t)))
                definitions.extend(d)
                templates.extend(t)
                temp_set.update(schema_index[s]["import"])
                imported.add(s)
            else:
                logger.debug("{} already imported".format(s))
        import_set = temp_set

    res = {}
    for prop in ["identifier", "name", "description", "version", "datePublished"]:
        res[prop] = schema_index[top_schema].get(prop)
    res["@graph"] = definitions
    res["template"] = templates

    logger.info("Imported schema {} ({}) version {}, published at {}"
                .format(res["identifier"], res["name"], res["version"], res["datePublished"]))

    logger.info(res["description"])
    logger.info("There are {} definitions imported, {} templates imported."
                .format(len(res["@graph"]), len(res["template"])))

    return res


def is_complex_type_range(type_name, types):
    if type_name in types:
        rt = types[type_name]
        return "DataType" not in rt and "CnsDataStructure" not in rt
    else:
        logger.warning("Invalid range type: {}".format(type_name))
        return False


PROP_CLASS = "CnsProperty"  # rdf:Property
CLASS_CLASS = "CnsClass"  # rdf:Class


def main(schema, schema_file):
    types = {}  # type hierarchy
    slinks = {}  # simple link index
    plist = {}  # plist property: critical info for generation
    definition_index = {}  # detail info

    # 扫描所有的type
    for t in schema["@graph"]:
        if t["@type"][0] == CLASS_CLASS:
            type_name = t["name"]
            if type_name not in types:
                types[type_name] = [type_name]

            parent_class = t.get("rdfs:subClassOf")
            if parent_class:
                types[type_name].append(parent_class[0])

            definition_index[type_name] = {
                "category": "class",
                "nameZh": t.get("nameZh", "")
            }

    # 生成完整的继承关系
    for t, h in types.items():
        while h[-1] in types and len(types[h[-1]]) > 1:
            h.append(types[h[-1]][1])

    for t in schema["@graph"]:
        if t["@type"][0] == PROP_CLASS:
            prop_name = t["name"]
            if t["category"] == "link":
                slinks[prop_name] = [prop_name, "CnsLink"]
                definition_index[prop_name] = {
                    "category": "link",
                    "nameZh": t.get("nameZh", ""),
                    "range": t["range"]
                }
            elif t["category"] == "attribute":
                definition_index[t["name"]] = {
                    "category": "attribute",
                    "nameZh": t.get("nameZh", ""),
                    "range": t["range"],
                    "alternateName": t.get("alternateName", [])
                }
            else:
                logger.error("Property definition {} has a category {} that is neither link nor attribute"
                             .format(prop_name, t["category"]))

    for t in schema["template"]:
        type_name = t["refClass"]
        if type_name in types:
            if type_name not in plist:
                plist[type_name] = {}

            # property has to specify its range (data type) and its alternate name
            ref_property = t["refProperty"]
            prop_range = t["propertyRange"]

            is_link = is_complex_type_range(prop_range, types)

            if ref_property not in definition_index:
                if is_link:
                    definition_index[ref_property] = {
                        "category": "link",
                        "nameZh": t.get("propertyNameZh"),
                        "range": prop_range
                    }
                else:
                    definition_index[ref_property] = {
                        "category": "attribute",
                        "nameZh": t.get("propertyNameZh"),
                        "range": prop_range,
                        "alternateName": t.get("alternateName", [])
                    }

            if is_link:
                slinks[ref_property] = [ref_property, "CnsLink"]
            else:
                alt_names = set()
                alt_names.update(t.get("propertyAlternateName", []))
                alt_names.update(definition_index[ref_property].get("alternateName", []))
                plist[type_name][ref_property] = {
                    "range": prop_range,
                    "alt": list(alt_names),
                    "min": t.get("minCardinality", 0)
                }
                if t.get("maxCardinality"):
                    plist[type_name][ref_property]["max"] = t.get("maxCardinality")

    types.update(slinks)

    output_schema = {
        "types": types,
        "slinks": list(slinks.keys()),
        "plist": plist,
        "index": definition_index
    }

    for p in ["identifier", "name", "description", "version", "datePublished"]:
        output_schema[p] = schema[p]

    if schema_file.endswith('json'):
        save_json(output_schema, schema_file, indent=2)
    else:
        save_pickle(output_schema, schema_file)

    show_schema(output_schema)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="generate an in memory view of a schema")
    parser.add_argument("schema", nargs='+', help="schema jsonld file path and its dependencies")
    parser.add_argument("--output", "-o",
                        help="output file path, default is `{schema_identifier}.json`. "
                             "you can use ext `json` for json format and `pkl` for pickle format")
    args = parser.parse_args()

    setup_default_logging()

    full_schema = create_full_schema(map(load_json, args.schema))

    if not args.output:
        args.output = "{0}.json".format(full_schema["identifier"])

    main(full_schema, args.output)
