# -*- coding: utf-8 -*-

"""
Spark相关函数
"""

from __future__ import unicode_literals
from __future__ import absolute_import

import sys
import re
import json
from typing import Tuple, Callable, Any, AnyStr, Union, Dict, List, Text

from pyspark import SparkContext, SparkConf, RDD
from pyspark.sql import HiveContext, DataFrame
from pyspark.sql.utils import AnalysisException, IllegalArgumentException

import pyspark.sql.types as spark_types
from pyspark.sql.types import StructType, StructField, StringType, DataType, Row

REL_PATTERN = re.compile(r"UnresolvedRelation `(\w+)`")

# spark配置项
SPARK_CONFIG = None
HADOOP_CONFIG = None
URI = None
Path = None
FileSystem = None
FileUtil = None
# HDFS对象，用于操作HDFS文件
HDFS = None
LocalFS = None
Configuration = None
jvm = None

if sys.version >= "3":
    def df_row_to_dict(row):  # type: (Row) -> Dict
        return row.asDict(True)
else:
    def df_row_to_dict(row):  # type: (Row) -> Dict
        """for Python 2 convert row to dict with unicode keys"""
        if not hasattr(row, "__fields__"):
            raise TypeError("Cannot convert a Row class into dict")

        def ensure_unicode(s):
            if isinstance(s, str):
                return s.decode("utf-8")
            else:
                return s

        def conv(obj):
            if isinstance(obj, Row):
                return obj.asDict(True)
            elif isinstance(obj, list):
                return [conv(o) for o in obj]
            elif isinstance(obj, dict):
                return dict((ensure_unicode(k), conv(v)) for k, v in obj.items())
            else:
                return obj

        unicode_fields = [ensure_unicode(k) for k in row.__fields__]
        return dict(zip(unicode_fields, (conv(o) for o in row)))


def init_spark():  # type: () -> Tuple[SparkContext, HiveContext]
    global SPARK_CONFIG, HADOOP_CONFIG
    global URI
    global Path
    global FileSystem, FileUtil, Configuration
    global HDFS, LocalFS
    global jvm

    sc = SparkContext(conf=SparkConf())
    session = HiveContext(sc)
    SPARK_CONFIG = dict(session.sql("SET").collect())
    HADOOP_CONFIG = sc._jsc.hadoopConfiguration()

    jvm = sc._gateway.jvm

    URI = jvm.java.net.URI
    Path = jvm.org.apache.hadoop.fs.Path
    FileSystem = jvm.org.apache.hadoop.fs.FileSystem
    FileUtil = jvm.org.apache.hadoop.fs.FileUtil
    Configuration = jvm.org.apache.hadoop.conf.Configuration
    HDFS = FileSystem.get(HADOOP_CONFIG)
    LocalFS = FileSystem.getLocal(HADOOP_CONFIG)

    return sc, session


def _check_dst(src_name, dst_fs, dst_path, overwrite):
    if dst_fs.exists(dst_path):
        dst_status = dst_fs.getFileStatus(dst_path)
        if dst_status.isDir():
            if not src_name:
                raise IOError("Target " + dst_path.toString() + " is a directory")
            return _check_dst(None, dst_fs, Path(dst_path, src_name), overwrite)
        elif not overwrite:
            raise IOError("Target " + dst_path.toString() + " already exists")
    return dst_path


def copy_merge(src_fs, src_dir, dst_fs, dst_file, delete_source, conf):
    """
    Re-implement FileUtil.copyMerge: this method is removed in Hadoop 3.0. Whoever propose this change is a f**king
    idiot and should be shot dead.
    :param src_fs: source FileSystem
    :param src_dir: source directory Path
    :param dst_fs: destination FileSystem
    :param dst_file: destination file Path
    :param delete_source: True or False if the source directory would be deleted after the copy
    :param conf: Configuration
    :return: True if succeed else False
    """
    dst = _check_dst(src_dir.toString(), dst_fs, dst_file, False)
    if not src_fs.getFileStatus(src_dir).isDir():
        return False

    out = dst_fs.create(dst)

    try:
        for content in src_fs.listStatus(src_dir):
            if not content.isDir():
                in_stream = src_fs.open(content.getPath())
                try:
                    jvm.org.apache.hadoop.io.IOUtils.copyBytes(in_stream, out, conf, False)
                finally:
                    in_stream.close()
    finally:
        out.close()

    if delete_source:
        return src_fs.delete(src_dir, True)
    else:
        return True


def current_context():  # type: () -> SparkContext
    """
    当前SparkContext
    """
    return SparkContext.getOrCreate()


def current_session():  # type: () -> HiveContext
    """
    当前HiveContext
    """
    return HiveContext.getOrCreate(current_context())


def current_version():  # type: () -> AnyStr
    """
    当前spark的版本
    """
    return current_context().version


def is_v1():  # type: () -> bool
    """
    是否是spark 1.x
    """
    return current_version().startswith("1")


def parse_sql(sql):  # type: (AnyStr) -> Union[AnyStr,None]
    """
    解析SQL语句，返回执行计划。同时兼容spark1.x和spark 2.x。

    :param sql: str，SQL语句
    :return: SQL执行计划
    """
    try:
        if is_v1():
            parser = current_session()._ssql_ctx
            return parser.parseSql(sql).toString()
        else:
            parser = current_session().sparkSession._jsparkSession.sessionState().sqlParser()
            return parser.parsePlan(sql).toString()
    except AnalysisException:
        return None
    except IllegalArgumentException:
        return None


def parse_tables_from_plan(plan):
    """
    从执行计划中解析查找所有用到的表

    :param plan: spark执行计划
    :return: list, 表名称list
    """
    return REL_PATTERN.findall(plan)


def broadcast_variable(v):
    """
    广播变量到集群各节点中
    """
    return current_context().broadcast(v)


def empty_rdd():
    return current_context().emptyRDD()


def text_dataframe_to_rdd(df, converter=None):
    if converter:
        def __convert__(x):
            return converter(x["value"])
    else:
        def __convert__(x):
            return x["value"]

    if df and not df.rdd.isEmpty():
        return df.rdd.map(__convert__)
    else:
        return empty_rdd()


def to_text_dataframe(rdd, to_text_mapper=None):
    # type: (RDD, Callable[[Any], AnyStr]) -> DataFrame
    """
    由text RDD创建DataFrame，text转换函数可选，默认为None

    :param rdd: RDD，输入text RDD
    :param to_text_mapper: f: Any -> str, 转换函数为可选
    :return: DataFrame，结果只包含一列`value`
    """
    if to_text_mapper:
        rdd = rdd.map(lambda x: (to_text_mapper(x),))
    else:
        rdd = rdd.map(lambda x: (x,))
    return current_session().createDataFrame(rdd, schema=StructType([StructField("value", StringType())]))


def to_simple_typed_dataframe(rdd, fields, data_type_mapping=None):
    # type: (RDD, List[Tuple[Text,Text]], Dict[Text, DataType]) -> DataFrame
    """
    由rdd创建DataFrame，字段和类型由fields指定
    这里的simple是指，fields只指定一层的类型，不指定嵌套类型

    :param rdd: 输入RDD，每个RDD元素都是一个tuple
    :param fields: [(field_name, field_type)]，字段名/类型列表
    :param data_type_mapping: 字段类型字符串到 pyspark.sql.types.DataType 的映射
                              如果不存在，则认为所有类型都可以按照默认规则转换成DataType（即首字母大写，加Type）
    :return: 由类型的DataFrame
    """
    if data_type_mapping:
        def _get_real_type(field_name):  # type: (Text) -> DataType
            return data_type_mapping.get(field_name, StringType)()
    else:
        def _get_real_type(field_name):  # type: (Text) -> DataType
            return getattr(spark_types, field_name.title() + "Type", StringType)()

    return current_session().createDataFrame(rdd,
                                             StructType([StructField(s, _get_real_type(t)) for s, t in fields]))


def to_all_string_dataframe(rdd, fields):
    """
    由rdd创建所有字段均为string的DataFrame

    :param rdd: RDD，text RDD
    :param fields: [str]，字段名列表
    :return: 字段均为string的DataFrame
    """
    return current_session().createDataFrame(rdd, StructType([StructField(s, StringType()) for s in fields]))


def lines_to_text_dataframe(lines):  # type: (List[Text]) -> DataFrame
    lines = ["\n".join(lines)]
    rdd = current_context().parallelize(lines, 1)
    return to_text_dataframe(rdd)


def text_dataframe_to_lines(df):  # type: (DataFrame) -> List[Text]
    return df.rdd.map(lambda x: x["value"]).collect()


def json_to_text_dataframe(json_data):  # type: (Dict) -> DataFrame
    s = json.dumps(json_data, sort_keys=True, indent=4)
    lines = [s]
    rdd = current_context().parallelize(lines, 1)
    return to_text_dataframe(rdd)


def text_dataframe_to_json(df):  # type: (DataFrame) -> Dict
    lines = df.rdd.map(lambda x: x["value"]).collect()
    return json.loads("\n".join(lines))
