# -*- coding: utf-8 -*-
"""
Transwarp StellarDB specific code
"""

from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division

import logging
from collections import OrderedDict
from typing import List, Dict, Text, Tuple
from decimal import Decimal

from kgpipeline.tools.json2csv import item_to_string
from pyspark.sql.types import StringType, BooleanType, DoubleType, LongType, IntegerType
from kgpipeline.tools.json2csv import get_json_rdd_keys
from kgpipeline.calc.diff import key_diff

logger = logging.getLogger(__name__)

DATA_TYPE_MAPPING = {
    "Integer": "long",
    "Float": "double",
    "Double": "double",
    "Decimal": "double",
    "Number": "double",
    "Boolean": "boolean",
    "Date": "local_datetime",
    "DateTime": "local_datetime",
    "Text": "string",
    "URL": "string"
}

DATAFRAME_TYPE_MAPPING = {
    "string": StringType,
    "boolean": BooleanType,
    "int": IntegerType,
    "long": LongType,
    "double": DoubleType,
    "local_datetime": StringType  # we first use StringType and then cast these columns to timestamp (TimestampType)
}

HIVE_TYPE_MAPPING = {
    "string": "string",
    "boolean": "boolean",
    "int": "int",
    "long": "bigint",
    "double": "double",
    "local_datetime": "timestamp"
}

# properties that map to special fields in StellarDB
# 1. kg_label maps to label
# 2. kg_type maps to tags
# 3. in_id maps to usid
# 4. out_id maps to udid
SYSTEM_PROPERTIES = {"kg_label", "kg_type", "in_id", "out_id"}


def json_keys_to_tw(keys):
    """
    将json keys转换为tw导入格式需要的header格式：
    1. @id 转换为 kg_id
    2. @type 转换为 kg_type, 附加 kg_label

    我们强制以下字段优先处理：
    kg_id, kg_label, kg_type in, out
    """
    headers = []
    key_set = {k for k, _ in keys}

    if "@id" in key_set:
        headers.append("kg_id")
        key_set.remove("@id")

    if "@type" in key_set:
        headers.append("kg_label")
        headers.append("kg_type")
        key_set.remove("@type")

    if "in" in key_set:
        headers.append("in_id")
        key_set.remove("in")

    if "out" in key_set:
        headers.append("out_id")
        key_set.remove("out")

    headers.extend(sorted(key_set))

    return headers


def get_json_rdd_headers(rdd):
    """
    获取json rdd数据转换为dataframe需要的schema字段
    """
    keys_with_list_type = get_json_rdd_keys(rdd)
    return json_keys_to_tw(keys_with_list_type)


def get_data_extractor(keys):
    """
    将 json 数据转换为tw导入格式需要的格式：
    1. @type 对应的 kg_label ：对于 entity 来说统一是 Thing，对于 link 来说是 @type[0]
    2. in 附加 in_label，out 附加 out_label，其取值都是一样的 Thing
    """

    def __extract_data(d):
        data = []
        for k in keys:
            if k == "kg_id":
                data.append(d.get("@id"))
            elif k == "kg_type":
                data.append(item_to_string(d.get("@type")))
            elif k == "kg_label":
                type_list = d.get("@type")
                if "CnsLink" in type_list:
                    data.append(type_list[0])
                else:
                    data.append("Thing")
            elif k == "in_id":
                data.append(d.get("in"))  # in id
            elif k == "out_id":
                data.append(d.get("out"))  # out id
            else:
                dt = d.get(k)
                if isinstance(dt, (list, dict)):
                    data.append(item_to_string(dt))
                elif isinstance(dt, Decimal):
                    data.append(float(dt))
                else:
                    data.append(dt)
        return data

    return __extract_data


def get_property_type(schema, prop):
    """
    based on schema range, get the property type in transwarp graph schema

    Integer: long （我们总是使用long而不是int）
    Float: double
    Boolean: boolean
    Date/DateTime: localdatetime
    Text/URL: string
    """
    prop_def = schema.get_definition(prop)
    if prop_def:
        dt = prop_def.get("range")
    else:
        logger.warn("property {0} not found in schema!".format(prop))
        dt = "Text"
    return DATA_TYPE_MAPPING.get(dt, "string")


def get_schema_typed_fields(schema, keys):
    """
    将单纯的属性名称列表转化成[(property, property_type)]列表
    :param schema: KgCnSchema
    :param keys: 属性名列表
    :return: List[Tuple[Text]]
    """
    fields = []
    for k in keys:
        if k not in SYSTEM_PROPERTIES:
            if k == "kg_id":
                fields.append(("__uid", "string"))
            else:
                fields.append((k, get_property_type(schema, k)))
    return fields


def get_loader_typed_fields(schema, keys):
    """
    将单纯的属性名称列表转化成[(property, property_type)]列表
    :param schema: KgCnSchema
    :param keys: 属性名列表
    :return: List[Tuple[Text]]
    """
    return [(k, get_property_type(schema, k)) for k in keys]


def create_empty_tw_schema(graph_name, shard_number=3):
    return {
        "graph.name": graph_name,
        "graph.shard.number": shard_number,
        "vertex.tables": [],
        "edge.tables": []
    }


def create_vertex_table_schema(fields, indices):  # type: (List[Tuple[Text,Text]], List[Text]) -> Dict
    return {
        "label.value": "Thing",
        "field.schemas": [{
            "field.name": k,
            "field.type": t
        } for k, t, in fields],
        "index.schemas": indices
    }


def create_edge_table_schema(label, fields, indices):  # type: (Text, List[Tuple[Text,Text]], List[Text]) -> Dict
    return {
        "label.value": label,
        "src.label.value": "Thing",
        "dst.label.value": "Thing",
        "field.schemas": [{
            "field.name": k,
            "field.type": t
        } for k, t, in fields],
        "index.schemas": indices
    }


def diff_fields(new_fields, old_fields):
    # type: (Dict[Text, Text], Dict[Text, Text]) -> Dict[Text, List[Tuple[Text, Text]]]
    """
    find the difference of fields
    used for:
    1. add_field graph_name (:Label {field_name, field_type})
    2. delete_field graph_name (:Label {field_name, field_type})
    3. alter_field graph_name (:Label {field_name, field_type})
    """
    removed, added, updated = key_diff(new_fields, old_fields, set())
    diff = {}
    if removed:
        diff["removed"] = [(f, old_fields[f]) for f in removed]
    if added:
        diff["added"] = [(f, new_fields[f]) for f in added]
    if updated:
        diff["updated"] = [(f, new_fields[f]) for f in updated]
    return diff


def diff_schema(new_schema, old_schema):
    """
    calculate change of schemas of one graph and update new_schema for removed old_schemas

    TODO: index changes (dynamically create new index)
    """
    diff = {}
    if new_schema["graph.name"] != old_schema["graph.name"]:
        raise Exception("We need schemas of the same graph!!!")

    diff["graph.name"] = new_schema["graph.name"]

    new_vertices = {}
    new_indices = {}
    for v in new_schema["vertex.tables"]:
        new_vertices[v["label.value"]] = {f["field.name"]: f["field.type"] for f in v["field.schemas"]}
        new_indices[v["label.value"]] = {f for f in v["index.schemas"]}
    old_vertices = {}
    old_indices = {}
    for v in old_schema["vertex.tables"]:
        old_vertices[v["label.value"]] = {f["field.name"]: f["field.type"] for f in v["field.schemas"]}
        old_indices[v["label.value"]] = {f for f in v["index.schemas"]}

    # we assume there are only one vertex label Thing
    if len(new_vertices) != 1 or new_vertices.keys()[0] != "Thing":
        raise Exception("New schema is not valid: one and only one label Thing is allowed!")
    if len(old_vertices) != 1 or old_vertices.keys()[0] != "Thing":
        raise Exception("Old schema is not valid: one and only one label Thing is allowed!")

    new_fields = new_vertices["Thing"]
    old_fields = old_vertices["Thing"]

    diff["vertex"] = diff_fields(new_fields, old_fields)

    diff["vertex.indices"] = list(new_indices["Thing"] - old_indices["Thing"])

    new_edges = {}
    new_indices = {}
    for v in new_schema["edge.tables"]:
        new_edges[v["label.value"]] = {f["field.name"]: f["field.type"] for f in v["field.schemas"]}
        new_indices[v["label.value"]] = {f for f in v["index.schemas"]}
    old_edges = {}
    old_indices = {}
    for v in old_schema["edge.tables"]:
        old_edges[v["label.value"]] = {f["field.name"]: f["field.type"] for f in v["field.schemas"]}
        old_indices[v["label.value"]] = {f for f in v["index.schemas"]}

    removed, added, updated = key_diff(new_edges, old_edges, set())
    if removed:
        logger.warn("following labels are removed in schema but not allowed in transwarp so we ignore them: {0}".format(
            ", ".join(removed)))

        for edge_detail in old_schema["edge.tables"]:
            if edge_detail["label.value"] in removed:
                new_schema["edge.tables"].append(edge_detail)

    diff["edge"] = {}
    if added:
        diff["edge"]["added"] = {e: new_edges[e] for e in added}
        diff["edge"]["added.indices"] = {e: list(new_indices[e]) for e in added}
    if updated:
        diff["edge"]["updated"] = {e: diff_fields(new_edges[e], old_edges[e]) for e in updated}
        diff["edge"]["updated.indices"] = {e: list(new_indices[e] - old_indices[e]) for e in updated}

    return diff


def apply_field_diff(graph, label, fd, index_set):
    res = []
    if fd:
        for key, command in [("removed", "remove_field"), ("added", "add_field"), ("updated", "alter_field")]:
            if key in fd:
                for f, t in fd[key]:
                    if f in index_set and command != "remove_field":
                        res.append("{0} {1} (:{2} {{ {3} {4} index:DEFAULT }})".format(command, graph, label, f, t))
                    else:
                        res.append("{0} {1} (:{2} {{ {3} {4} }})".format(command, graph, label, f, t))
    return res


def apply_schema_change(schema_diff):
    res = []
    graph_name = schema_diff["graph.name"]
    res.extend(apply_field_diff(graph_name, "Thing", schema_diff["vertex"], schema_diff["vertex.indices"]))

    if "updated" in schema_diff["edge"]:
        for edge, field_diff in schema_diff["edge"]["updated"].items():
            res.extend(apply_field_diff(graph_name, edge, field_diff, schema_diff["edge"]["updated.indices"][edge]))

    if "added" in schema_diff["edge"]:
        for edge, edge_detail in schema_diff["edge"]["added"].items():
            index_set = schema_diff["edge"]["added.indices"][edge]
            fields = []
            for f, t in edge_detail.items():
                if f in index_set:
                    fields.append("{0} {1} index:DEFAULT".format(f, t))
                else:
                    fields.append("{0} {1}".format(f, t))
            # always add edge uid index
            fields.append("__EXTRAID string index:DEFAULT")
            fields_str = ", ".join(fields)
            res.append("alter_graph_schema {0} [:{1} {{ {2} }}]".format(graph_name, edge, fields_str))
    return res


def create_empty_load_schema():
    return {
        "loadSchemaVersion": "1.0",
        "loaders": []
    }


def _create_property_mapping(fields):
    props = OrderedDict()
    for k, _ in fields:
        if k != "__uid":
            props[k] = k.lower()  # use lower case as StellarDB only recognize this though Hive is case insensitive
    return props


def create_vertex_loader(fields):
    return {
        "graph.load.type": "vertex",
        "desc": {
            "uid": "kg_id",
            "ulabel": {"type": "STRING_CONSTANT", "value": "Thing"},
            "tags": {"type": "COLUMN_NAME", "value": "kg_type", "delimiter": ";"},
            "properties": _create_property_mapping(fields)
        }
    }


def create_edge_loader(fields):
    return {
        "graph.load.type": "edge",
        "reverse.direction.needed": False,
        "desc": {
            "tags": {"type": "COLUMN_NAME", "value": "kg_type", "delimiter": ";"},
            "usid": "in_id",
            "uslabel": {"type": "STRING_CONSTANT", "value": "Thing"},
            "udid": "out_id",
            "udlabel": {"type": "STRING_CONSTANT", "value": "Thing"},
            "ueid": "kg_id",
            "uelabel": "kg_label",
            "properties": _create_property_mapping(fields)
        }
    }


def generate_external_table_hql(table_name, loader_fields, path):
    res = ["CREATE EXTERNAL TABLE {0} (".format(table_name)]
    field_list = []
    for k, t in loader_fields:
        field_list.append("{0} {1},".format(k, HIVE_TYPE_MAPPING.get(t, t)))
    field_list[-1] = field_list[-1][:-1]
    res.extend(field_list)
    res.append(")")
    res.append("STORED AS ORC")
    res.append("LOCATION '{0}';".format(path))
    return res
