# -*- coding: utf-8 -*-

from __future__ import unicode_literals
from __future__ import absolute_import

import os
from argparse import ArgumentParser

from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import StringType, StructField, StructType

from kgpipeline.jsonutil import item_to_string, entity_json_decode


"""
将JSON格式的图谱数据的实体和连接转换成neo4j发版需要的CSV格式
"""


def get_key_and_type_list(d):  # type: (dict) -> list
    """
    返回每行数据的
    """
    key_list = []
    for k, v in d.items():
        if isinstance(v, list):
            key_list.append((k, True))
        else:
            key_list.append((k, False))
    return key_list


def get_json_rdd_keys(rdd):
    """
    获取RDD中所有json数据的key列表

    :param rdd: RDD
    :return: key列表
    """
    return rdd.flatMap(get_key_and_type_list).distinct(1).reduceByKey(lambda x, y: x or y).collect()


LINK_KEY_MAP = {
    'in': ':START_ID',
    'out': ':END_ID'
}

ENTITY_KEY_MAP = {
    '@id': "kgid:ID",
    '@type': ':LABEL'
}


def json_keys_to_neo4j(keys, is_link):
    """
    将json keys转换为neo4j导入格式需要的header格式：
    ":START_ID","name",":END_ID",":TYPE"
    """

    key_map = is_link and LINK_KEY_MAP or ENTITY_KEY_MAP

    headers = []
    for k, is_list in keys:
        if k == "@type":
            if is_link:
                headers.append(":TYPE")
                headers.append("@type:string[]")
            else:
                headers.append(":LABEL")
        else:
            h = key_map.get(k, k)
            if is_list:
                h += ":string[]"
            headers.append(h)
    return headers


def get_data_extractor(keys, is_link):
    def __extract_data(d):
        data = list()
        for k in keys:
            dt = d.get(k)
            if k == "@type" and is_link:
                data.append(dt[0])
            data.append(item_to_string(dt))
        return data

    return __extract_data


def json_rdd_to_csv_df(sql_context, rdd):
    """
    将json rdd（entity或link）转换为DataFrame

    :param sql_context: SQLContext 或 HiveContext
    :param rdd: RDD
    :return: 封装了输入RDD的DataFrame
    """
    keys_with_list_type = get_json_rdd_keys(rdd)
    is_link = "CnsLink" in rdd.take(1)[0]["@type"]
    headers = json_keys_to_neo4j(keys_with_list_type, is_link)

    keys = [k for k, _ in keys_with_list_type]
    func = get_data_extractor(keys, is_link)
    rdd = rdd.map(func)

    schema = StructType([StructField(k, StringType(), nullable=True) for k in headers])
    return sql_context.createDataFrame(rdd, schema=schema)


def parse_int(s):
    return int(s)


def main():
    """
    格式转换入口函数
    """
    parser = ArgumentParser(description="kg entity or link json to neo4j csv")
    parser.add_argument("--partition", "-p", type=parse_int,
                        help="the partition of the output, default is the same as input")
    parser.add_argument("input", help="json file that contains all entities")
    parser.add_argument("--output", "-o",
                        help="csv output name, if not specified then the name is {input}.csv")

    args = parser.parse_args()

    if not args.output:
        args.output = os.path.splitext(args.input)[0] + ".csv"

    input_path = args.input
    output_path = args.output

    sc = SparkContext(conf=SparkConf())
    session = SQLContext(sc)

    json_rdd = sc.textFile(input_path).map(entity_json_decode)

    df = json_rdd_to_csv_df(session, json_rdd)

    if args.partition:
        df = df.repartition(args.partition)

    if sc.version.startswith("1"):
        csv_format = "com.databricks.spark.csv"
    else:
        csv_format = "csv"

    df.write.mode("overwrite").format(
        csv_format).options(nullValue="", header="true", escape='"').save(output_path)
