# -*- coding: utf-8 -*-

"""
TigerGraph Specific Code
"""

import logging
from collections import OrderedDict

from kgpipeline.cnschema import KgCnSchema
from kgpipeline.calc.diff import key_diff
from kgpipeline.tg.reserved import is_keyword

from typing import Set, List, Dict
from six import text_type

logger = logging.getLogger(__name__)


class IncompatibleSchemaChange(Exception):
    pass


DATA_TYPE_MAPPING = {
    "Integer": "INT",
    "Float": "DOUBLE",
    "Double": "DOUBLE",
    "Decimal": "DOUBLE",
    "Boolean": "BOOL",
    "Date": "DATETIME",
    "DateTime": "DATETIME",
    "Text": "STRING",
    "URL": "STRING",
    "Category": "STRING COMPRESS"
}

TUPLE_DATA_TYPE_MAPPING = {
    "Integer": "INT",
    "Float": "DOUBLE",
    "Double": "DOUBLE",
    "Decimal": "DOUBLE",
    "Boolean": "BOOL",
    "Date": "DATETIME",
    "DateTime": "DATETIME",
    "Text": "STRING(8)",
    "URL": "STRING(8)"
}

# _i: in
# _o: out
# _t: @type
# _id: @id
# _it: in type
# _ot: out type
TG_KEYS = {"_i", "_o", "_t", "_id", "_it", "_ot"}

TG_LIST_SEP = "|"

EXCLUDED_KEYS = {"graphId"}


def change_keyword(s):
    return s + "_"


def filter_keyword(filter_dict, s):
    if s in filter_dict:
        return filter_dict[s]
    if is_keyword(s):
        so = s
        s = change_keyword(so)
        filter_dict[so] = s
        logger.warn("property {0} is a TigerGraph GSQL reserved keyword! Changed to {1}".format(so, s))
    return s


def convert_list_value(val):
    return TG_LIST_SEP.join([text_type(v) for v in val])


def convert_keys(filtered_keys, val):
    for k in val.keys():
        if k in filtered_keys:
            val[filtered_keys[k]] = val.pop(k)
    return val


def convert_data(filtered_keys, x):
    for k in x.keys():
        v = x[k]
        if isinstance(v, list):
            if len(v) == 0:
                v = ""
            else:
                if isinstance(v[0], dict):
                    v = [convert_keys(filtered_keys, iv) for iv in v]
                else:
                    v = convert_list_value(v)
        elif isinstance(v, dict):
            v = convert_keys(filtered_keys, v)

        if k in filtered_keys:
            del x[k]
            k = filtered_keys[k]
        x[k] = v

    return x


def append_property(schema, tuples, prop_list, p, lc, filter_dict):
    prop_def = schema.get_definition(p)
    if prop_def:
        dt = prop_def.get("range")
    else:
        logger.warn("property {0} not found in schema!".format(p))
        dt = "String"

    p = filter_keyword(filter_dict, p)

    if schema.is_cns_data_structure(dt):
        if dt not in tuples:
            tuples[dt] = []
        prop_list.append((p, dt))
    else:
        dt = DATA_TYPE_MAPPING.get(dt, "STRING")
        if lc > 0:
            dt = "LIST<{0}>".format(dt)
        prop_list.append((p, dt))


def create_tg_schema(schema,
                     entity_properties,
                     link_properties,
                     relations,
                     multigraph_link_types,
                     excluded_keys):  # type: (KgCnSchema, List, List, List, Set, Set) -> Dict
    multi = set(multigraph_link_types)

    relation_in_index = {}
    relation_out_index = {}
    relation_type_index = {}

    for relation_type, in_type, out_type in relations:
        if relation_type not in relation_in_index:
            relation_in_index[relation_type] = {in_type}
        else:
            relation_in_index[relation_type].add(in_type)

        if relation_type not in relation_out_index:
            relation_out_index[relation_type] = {out_type}
        else:
            relation_out_index[relation_type].add(out_type)

        if relation_type not in relation_type_index:
            relation_type_index[relation_type] = {(in_type, out_type)}
        else:
            relation_type_index[relation_type].add((in_type, out_type))

    tuples = {}

    edges = {}
    filtered_keys = {}

    for _t, _p, _lc in sorted(link_properties, key=lambda x: (x[0], x[1])):
        if _t not in edges:
            link = {
                "is_multi": _t in multi,
                "from": list(relation_in_index[_t]),
                "to": list(relation_out_index[_t]),
                "from_to_types": list(relation_type_index[_t]),
                "properties": []
            }
            edges[_t] = link
        else:
            link = edges[_t]
        append_property(schema, tuples, link["properties"], _p, _lc, filtered_keys)

    vertices = {}
    for _t, _p, _lc in sorted(entity_properties, key=lambda x: (x[0], x[1])):
        if _t not in vertices:
            vert = []
            vertices[_t] = vert
        else:
            vert = vertices[_t]
        append_property(schema, tuples, vert, _p, _lc, filtered_keys)

    for cds, props in tuples.items():
        plist = schema.get_property_list(cds)
        for k in sorted(plist.keys()):
            r = TUPLE_DATA_TYPE_MAPPING.get(plist[k].get("range"), "STRING(8)")
            k = filter_keyword(filtered_keys, k)
            props.append((k, r))

    schema = {
        "tuples": tuples,
        "vertices": vertices,
        "edges": edges,
        "excluded_keys": list(excluded_keys),
        "filtered_keys": filtered_keys
    }

    return schema


class AttributeList(OrderedDict):
    def diff(self, other):  # (AttributeList, AttributeList) -> Dict
        res = {}
        removed, added, updated = key_diff(self, other, set())
        if removed:
            res["delete"] = removed
        if added:
            res["add"] = AttributeList([(a, self[a]) for a in added])
        if updated:
            res["update"] = AttributeList([(a, self[a]) for a in updated])
        return res

    def apply_diff(self, diff):
        if "delete" in diff:
            for a in diff["delete"]:
                del self[a]
        if "add" in diff:
            self.update(diff["add"])
        if "update" in diff:
            self.update(diff["update"])

    def __str__(self):
        return ", ".join("{0} {1}".format(a, t) for a, t in self.items())


def diff_tg_schema(schema_old, schema_new):
    """
    compare the old and new schema and create a diff list (mainly used for schema change job)

    for vertex:
        type: [(), (), ()]
        attribute add or delete

    for edge:
        link = {
                "is_multi": _t in multi,
                "from": list(relation_in_index[_t]),
                "to": list(relation_out_index[_t]),
                "from_to_types": list(relation_type_index[_t]),
                "properties": []
            }
        actually, is_multi can not be changed once created: because all data will be lost if we make this change
        from, to, and from_to_types are just for generating the loading job

    :param schema_old: old schema in json
    :param schema_new: new schema in json
    :return: new schema with corrected order and diff
    """
    diff = {
        "vertices": {},
        "tuples": {},
        "edges": {}
    }

    # vertices
    removed, added, updated = key_diff(schema_new["vertices"], schema_old["vertices"], set())
    if added:
        diff["vertices"]["add"] = {}
        for v in added:
            diff["vertices"]["add"][v] = schema_new["vertices"][v]
    if removed:
        diff["vertices"]["delete"] = removed

    if updated:
        diff["vertices"]["update"] = {}
        for v in updated:
            new_attrs = AttributeList(schema_new["vertices"][v])
            old_attrs = AttributeList(schema_old["vertices"][v])
            c = new_attrs.diff(old_attrs)
            if "update" in c:
                raise IncompatibleSchemaChange("type changed: {0} of type {1}!".format(c["updated"], v))

            if "add" in c:
                old_attrs.apply_diff(c)
                schema_new["vertices"][v] = old_attrs.items()

            diff["vertices"]["update"][v] = {
                "add": c.get("add", {}).items(),
                "delete": c.get("delete", set())
            }

    # tuples
    removed, added, updated = key_diff(schema_new["tuples"], schema_old["tuples"], set())
    if removed:
        logger.warn("removed tuples: {0} will have no effect!".format(", ".join(removed)))
    if updated:
        raise IncompatibleSchemaChange("updated tuples: {0} not allowed!".format(", ".join(updated)))
    if added:
        for t in added:
            diff["tuples"][t] = schema_new["tuples"][t]

    # edges:
    removed, added, updated = key_diff(schema_new["edges"], schema_old["edges"], set())
    if added:
        diff["edges"]["add"] = {}
        for e in added:
            diff["edges"]["add"][e] = schema_new["edges"][e]
    if removed:
        diff["edges"]["delete"] = removed
    if updated:
        diff["edges"]["update"] = {}
        for e in updated:
            if schema_new["edges"][e]["is_multi"] and not schema_old["edges"][e]["is_multi"]:
                raise IncompatibleSchemaChange("edge {0} was single edged but now is multiple: "
                                               "this is not allowed and you must perform a full publish")

            new_attrs = AttributeList(schema_new["edges"][e]["properties"])
            old_attrs = AttributeList(schema_old["edges"][e]["properties"])
            c = new_attrs.diff(old_attrs)
            if "update" in c:
                raise IncompatibleSchemaChange("type changed: {0} of type {1}!".format(c["updated"], v))

            if "add" in c:
                old_attrs.apply_diff(c)
                schema_new["edges"][e]["properties"] = c.items()

            diff["edges"]["update"][e] = {
                "add": c.get("add", {}).items(),
                "delete": c.get("delete", set())
            }

            if set(schema_new["edges"][e]["from_to_types"]) != set(schema_old["edges"][e]["from_to_types"]):
                diff["edges"]["update"][e]["from_to_update"] = True

            diff["edges"]["update"][e]["is_multi"] = schema_old["edges"][e]["is_multi"]

    return schema_new, diff


def generate_typedef(tuple_def):
    res = []
    for t, attr_list in tuple_def.items():
        res.append("TYPEDEF TUPLE <{0}> {1}".format(
            ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in attr_list]), t))
    return res


def generate_define_vertex(t, attr_list, indent="\t"):
    return [indent + "ADD VERTEX {1} (PRIMARY_ID kgid STRING, {0});".format(
        ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in attr_list]), t)]


def generate_define_edge(t, edge, indent="\t"):
    res = []
    from_type = "*" if len(edge["from"]) > 1 else next(iter(edge["from"]))
    to_type = "*" if len(edge["to"]) > 1 else next(iter(edge["to"]))

    if from_type is None:
        raise ValueError("invalid from type in edge: {}".format(edge))

    if to_type is None:
        raise ValueError("invalid to type in edge: {}".format(edge))

    if edge["is_multi"]:
        res.append(indent + "ADD VERTEX {1} (PRIMARY_ID kgid STRING, {0});".format(
            ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in edge["properties"]]), t))
        res.append(indent + "ADD DIRECTED EDGE {0}_in (FROM {1}, TO {0});".format(t, from_type))
        res.append(indent + "ADD DIRECTED EDGE {0}_out (FROM {0}, TO {1});".format(t, to_type))
    else:
        res.append(indent + "ADD DIRECTED EDGE {1} (FROM {2}, TO {3}, kgid STRING, {0});".format(
            ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in edge["properties"]]),
            t, from_type, to_type))
    return res


def generate_update_attributes(update_def):
    res = []
    if update_def:
        for t, u in update_def.items():
            is_multi = u.get("is_multi", True)
            update_type = "VERTEX" if is_multi else "EDGE"
            prop_delete = u.get("delete")
            if prop_delete:
                res.append("\tALTER {0} {1} DROP ATTRIBUTE ({2});".format(update_type, t, ", ".join(prop_delete)))
            prop_add = u.get("add")
            if prop_add:
                res.append("\tALTER {0} {1} ADD ATTRIBUTE ({2});".format(update_type, t, ", ".join([
                    "{0} {1}".format(name, gsql_type) for name, gsql_type in prop_add])))
    return res


def generate_add_schema_change_job(tg_schema, graph_name, job_name):
    res = ["CREATE SCHEMA_CHANGE JOB {1} FOR GRAPH {0} {{".format(graph_name, job_name)]

    for t, attr_list in tg_schema["vertices"].items():
        res.extend(generate_define_vertex(t, attr_list))

    for t, edge in tg_schema["edges"].items():
        res.extend(generate_define_edge(t, edge))

    res.append("}")

    return res


def generate_update_schema_change_job(tg_schema_diff, graph_name, job_name):
    """
    create schema change job to update current graph schema
    """
    res = ["CREATE SCHEMA_CHANGE JOB {1} FOR GRAPH {0} {{".format(graph_name, job_name)]
    content = []

    # we follow the order of:
    # 1. drop edges
    # 2. drop vertices
    # 3. add vertices
    # 4. add edges
    # it might be irreverent, but we keep the order anyway

    drop_edges = tg_schema_diff["edges"].get("delete")
    if drop_edges:
        content.append("\tDROP EDGE {0};".format(", ".join(drop_edges)))

    drop_vertices = tg_schema_diff["vertices"].get("delete")
    if drop_vertices:
        content.append("\tDROP VERTEX {0};".format(", ".join(drop_vertices)))

    for t, attr_list in tg_schema_diff["vertices"].get("add", {}).items():
        content.extend(generate_define_vertex(t, attr_list))

    for t, edge in tg_schema_diff["edges"].get("add", {}).items():
        content.extend(generate_define_edge(t, edge))

    vertex_update = tg_schema_diff["vertices"].get("update")
    if vertex_update:
        content.extend(generate_update_attributes(vertex_update))

    edge_udpate = tg_schema_diff["edges"].get("update")
    if edge_udpate:
        content.extend(generate_update_attributes(edge_udpate))

    if content:
        res.extend(content)
        res.append("}")
        return res
    else:
        return content


def generate_global_schema_gsql(tg_schema, graph_name):
    res = []
    defined = []

    for t, attr_list in tg_schema["tuples"].items():
        res.append("TYPEDEF TUPLE <{0}> {1}".format(
            ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in attr_list]), t))

    for t, attr_list in tg_schema["vertices"].items():
        res.append("CREATE VERTEX {1} (PRIMARY_ID kgid STRING, {0})".format(
            ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in attr_list]), t))
        defined.append(t)

    for t, edge in tg_schema["edges"].items():
        from_type = "*" if len(edge["from"]) > 1 else next(iter(edge["from"]))
        to_type = "*" if len(edge["to"]) > 1 else next(iter(edge["to"]))
        if edge["is_multi"]:
            res.append("CREATE VERTEX {1} (PRIMARY_ID kgid STRING, {0})".format(
                ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in edge["properties"]]), t))
            res.append("CREATE DIRECTED EDGE {0}_in (FROM {1}, TO {0})".format(t, from_type))
            res.append("CREATE DIRECTED EDGE {0}_out (FROM {0}, TO {1})".format(t, to_type))
            defined.extend([t, t + "_in", t + "_out"])
        else:
            res.append("CREATE DIRECTED EDGE {1} (FROM {2}, TO {3}, kgid STRING, {0})".format(
                ", ".join(["{0} {1}".format(name, gsql_type) for name, gsql_type in edge["properties"]]),
                t, from_type, to_type))
            defined.append(t)

    res.append("CREATE GRAPH {0} ({1})".format(graph_name, ", ".join(defined)))
    return res


def create_attr_values(tuples, attrs):
    res = []
    res_list = []
    i = 0
    for n, t in attrs:
        if t in tuples:
            tp = []
            for _n, _ in tuples[t]:
                tp.append('$"{0}":"{1}"'.format(n, _n))
            res.append("{0}({1})".format(t, ",".join(tp)))
        elif t.startswith("LIST<") and t.endswith(">"):
            it = t[5:-1]
            if it in tuples:
                res_list.append((n, i, it))
                res.append("_")
            else:
                res.append('SPLIT($"{0}", "|")'.format(n))
        else:
            res.append('$"{0}"'.format(n))
        i += 1

    return ", ".join(res), res_list


def construct_temp_table_clauses_for_vertex(tuples, list_attr, main_type, attrs):
    temp_table_clauses = []
    temp_table_loads = []
    for n, i, it in list_attr:
        temp_table = "t_{0}_{1}".format(main_type, n)
        temp_list_attrs = ["_"] * len(attrs)
        tuple_keys = tuples[it].keys()
        tuple_keys_json = ", ".join(['$"{0}"'.format(k) for k in tuple_keys])
        temp_table_clauses.append(
            '\t\tTO TEMP_TABLE {0} (_id, {4}) VALUES'
            ' ($"_id", flatten_json_array($"{1}", {3})) WHERE $"_t" == "{2}"'.format(
                temp_table, n, main_type, tuple_keys_json, ", ".join(tuple_keys)))
        temp_list_attrs[i] = '{0}({1})'.format(it, tuple_keys_json)

        temp_table_loads.append('\tLOAD TEMP_TABLE {0} TO VERTEX {1} VALUES ($"_id", {2});'.format(
            temp_table, main_type, ", ".join(temp_list_attrs)))
    return temp_table_clauses, temp_table_loads


def construct_temp_table_clauses_for_edge(tuples, list_attr, main_type, attrs, from_type, to_type):
    temp_table_clauses = []
    temp_table_loads = []
    if from_type:
        from_type_clause = "_" + from_type
        from_type_load = " " + from_type
        from_type_where = ' AND $"_it" == "{0}"'.format(from_type)
    else:
        from_type_clause = ""
        from_type_load = ""
        from_type_where = ""

    if to_type:
        to_type_clause = "_" + to_type
        to_type_load = " " + to_type
        to_type_where = ' AND $"_ot" == "{0}"'.format(to_type)
    else:
        to_type_clause = ""
        to_type_load = ""
        to_type_where = ""

    for n, i, it in list_attr:
        temp_table = "t_{0}_{1}{2}{3}".format(main_type, n, from_type_clause, to_type_clause)
        temp_list_attrs = ["_"] * len(attrs)
        tuple_keys = tuples[it].keys()
        tuple_keys_json = ", ".join(['$"{0}"'.format(k) for k in tuple_keys])
        temp_table_clauses.append(
            '\t\tTO TEMP_TABLE {0} (_i, _o, {4}) VALUES'
            ' ($"_i", $"_o", flatten_json_array($"{1}", {3})) WHERE $"_t" == "{2}"{4}{5}'.format(
                temp_table, n, main_type, tuple_keys_json, ", ".join(tuple_keys), from_type_where, to_type_where))
        temp_list_attrs[i] = '{0}({1})'.format(it, tuple_keys_json)

        temp_table_loads.append('\tLOAD TEMP_TABLE {0} TO EDGE {1} VALUES ($"_i"{3}, $"_o"{4}, {2});'.format(
            temp_table, main_type, ", ".join(temp_list_attrs), from_type_load, to_type_load))
    return temp_table_clauses, temp_table_loads


def generate_loading_job(tg_schema, graph_name, job_name):
    res = ['CREATE LOADING JOB {1} FOR GRAPH {0}'.format(graph_name, job_name) + " {",
           '\tDEFINE FILENAME entity;',
           '\tDEFINE FILENAME link;']

    load_entity = '\tLOAD entity\n{0} USING JSON_FILE="true";'
    load_link = '\tLOAD link \n{0} USING JSON_FILE="true";'

    # destination clause separator
    sep = ",\n"

    entity_dest = []
    entity_temp_tables = []
    for t, attr_list in tg_schema["vertices"].items():
        attrs, list_attr = create_attr_values(tg_schema["tuples"], attr_list)
        entity_dest.append('\t\tTO VERTEX {0} VALUES ($"_id", {1}) WHERE $"_t" == "{0}"'.format(
            t, attrs))
        if list_attr:
            clauses, loads = construct_temp_table_clauses_for_vertex(tg_schema["tuples"], list_attr, t, attr_list)
            entity_dest.extend(clauses)
            entity_temp_tables.extend(loads)

    res.append(load_entity.format(sep.join(entity_dest)))
    res.extend(entity_temp_tables)

    link_dest = []
    link_temp_tables = []

    for t, edge in tg_schema["edges"].items():
        from_type = "*" if len(edge["from"]) > 1 else next(iter(edge["from"]))
        to_type = "*" if len(edge["to"]) > 1 else next(iter(edge["to"]))
        attr_str, list_attr = create_attr_values(tg_schema["tuples"], edge["properties"])
        if edge["is_multi"]:
            link_dest.append('\t\tTO VERTEX {0} VALUES ($"_id", {1}) WHERE $"_t" == "{0}"'.format(
                t, attr_str))
            if list_attr:
                clauses, loads = construct_temp_table_clauses_for_vertex(
                    tg_schema["tuples"], list_attr, t, edge["properties"])
                link_dest.extend(clauses)
                link_temp_tables.extend(loads)
            if from_type != "*" and to_type != "*":
                link_dest.append('\t\tTO EDGE {0}_in VALUES ($"_i", $"_id") WHERE $"_t" == "{0}"'.format(t))
                link_dest.append('\t\tTO EDGE {0}_out VALUES ($"_id", $"_o") WHERE $"_t" == "{0}"'.format(t))
            elif from_type != "*" and to_type == "*":
                link_dest.append('\t\tTO EDGE {0}_in VALUES ($"_i", $"_id")'
                                 ' WHERE $"_t" == "{0}"'.format(t))
                for _, _to in edge["from_to_types"]:
                    link_dest.append('\t\tTO EDGE {0}_out VALUES ($"_id", $"_o" {1})'
                                     ' WHERE $"_t" == "{0}" AND $"_ot" == "{1}"'.format(t, _to))
            elif from_type == "*" and to_type != "*":
                link_dest.append('\t\tTO EDGE {0}_out VALUES ($"_id", $"_o")'
                                 ' WHERE $"_t" == "{0}"'.format(t))
                for _from, _ in edge["from_to_types"]:
                    link_dest.append('\t\tTO EDGE {0}_in VALUES ($"_i" {1}, $"_id")'
                                     ' WHERE $"_t" == "{0}" AND $"_it" == "{1}"'.format(t, _from))
            else:
                for _from, _to in edge["from_to_types"]:
                    link_dest.append(
                        '\t\tTO EDGE {0}_in VALUES ($"_i" {1}, $"_id")'
                        ' WHERE $"_t" == "{0}" AND $"_it" == "{1}" AND $"_ot" == "{2}"'.format(t, _from, _to))
                    link_dest.append(
                        '\t\tTO EDGE {0}_out VALUES ($"_id", $"_o" {2})'
                        ' WHERE $"_t" == "{0}" AND $"_it" == "{1}" AND $"_ot" == "{2}"'.format(t, _from, _to))
        else:
            from_to_param = []
            if from_type != "*" and to_type != "*":
                link_dest.append('\t\tTO EDGE {0} VALUES ($"_i", $"_o", $"_id", {1}) WHERE $"_t" == "{0}"'.format(
                    t, attr_str))
                from_to_param.append(("", ""))
            elif from_type != "*" and to_type == "*":
                for _, _to in edge["from_to_types"]:
                    link_dest.append('\t\tTO EDGE {0} VALUES ($"_i", $"_o" {2}, $"_id", {1})'
                                     ' WHERE $"_t" == "{0}" AND $"_ot" == "{2}"'.format(t, attr_str, _to))
                    from_to_param.append(("", _to))
            elif from_type == "*" and to_type != "*":
                for _from, _ in edge["from_to_types"]:
                    link_dest.append(
                        '\t\tTO EDGE {0} VALUES ($"_i" {2}, $"_o", $"_id", {1})'
                        ' WHERE $"_t" == "{0}" AND $"_it" == "{2}"'.format(t, attr_str, _from))
                    from_to_param.append((_from, ""))
            else:
                for _from, _to in edge["from_to_types"]:
                    link_dest.append(
                        '\t\tTO EDGE {0} VALUES ($"_i" {2}, $"_o" {3}, $"_id", {1})'
                        ' WHERE $"_t" == "{0}" AND $"_it" == "{2}" AND $"_ot" == "{3}"'.format(t, attr_str, _from, _to))
                    from_to_param.append((_from, _to))
            if list_attr:
                for from_param, to_param in from_to_param:
                    clauses, loads = construct_temp_table_clauses_for_edge(
                        tg_schema["tuples"], list_attr, t, edge["properties"], from_param, to_param)
                    link_dest.extend(clauses)
                    link_temp_tables.extend(loads)

    if link_dest:
        res.append(load_link.format(sep.join(link_dest)))
    res.extend(link_temp_tables)

    res.append("}")
    return res


def generate_delete_job(tg_schema, graph_name, job_name):
    res = [
        'CREATE LOADING JOB {1} FOR GRAPH {0}'.format(graph_name, job_name) + " {",
        '\tDEFINE FILENAME entity;'
        '\tDEFINE FILENAME link;'
    ]

    for t, _ in tg_schema["vertices"].items():
        res.append(
            '\tDELETE VERTEX {0} (PRIMARY_ID $"_id") FROM entity WHERE $"_t" == "{0}"'
            ' USING JSON_FILE="true";'.format(t))

    for t, edge in tg_schema["edges"].items():
        if edge["is_multi"]:
            res.append(
                '\tDELETE VERTEX {0} (PRIMARY_ID $"_id") FROM link WHERE $"_t" == "{0}"'
                ' USING JSON_FILE="true";'.format(t))
            for _from, _to in edge["from_to_types"]:
                res.append(
                    '\tDELETE EDGE {0}_in (FROM $"_i" {1}, TO $"_id") FROM link'
                    ' WHERE $"_t" == "{0}" AND $"_it" == "{1}" AND $"_ot" == "{2}"'
                    ' USING JSON_FILE="true";'.format(t, _from, _to))
                res.append(
                    '\tDELETE EDGE {0}_out (FROM $"_id", TO $"_o" {2}) FROM link'
                    ' WHERE $"_t" == "{0}" AND $"_it" == "{1}" AND $"_ot" == "{2}"'
                    ' USING JSON_FILE="true";'.format(t, _from, _to))
        else:
            for _from, _to in edge["from_to_types"]:
                res.append(
                    '\tDELETE EDGE {0} (FROM $"_i" {1}, TO $"_o" {2}) FROM link'
                    ' WHERE $"_t" == "{0}" AND $"_it" == "{1}" AND $"_ot" == "{2}"'
                    ' USING JSON_FILE="true";'.format(t, _from, _to))
    res.append("}")

    for line in res:
        if line.startswith("\tDELETE"):
            line += ' USING JSON_FILE = "true";'

    return res


def generate_full_loading_gsql(tg_schema, graph_name):
    """
    create a gsql file with following content
    1. drop jobs and queries that depends on the graph
    2. drop graph {name}
    3. create graph {name} () (optionally create tuples)
    4. create schema_change job define_{name} for graph {name}
    5. run schema_change job define_{name}
    6. create loading job load_{name} for graph {name} {...}
    7. create loading job delete_{name} for graph {name} {...}
    :param tg_schema: json based tg schema
    :param graph_name: graph name
    :return: list of lines
    """
    res = []
    res.extend(generate_typedef(tg_schema["tuples"]))
    res.append("CREATE GRAPH {0} ()".format(graph_name))
    res.append("USE GRAPH {0}".format(graph_name))
    res.extend(generate_add_schema_change_job(tg_schema, graph_name, "define_{0}".format(graph_name)))
    res.append("RUN SCHEMA_CHANGE JOB define_{0}".format(graph_name))
    res.append("DROP JOB define_{0}".format(graph_name))
    res.extend(generate_delete_job(tg_schema, graph_name, "delete_{0}".format(graph_name)))
    res.extend(generate_loading_job(tg_schema, graph_name, "load_{0}".format(graph_name)))

    return res
