# -*- coding: utf-8 -*-

"""
base of pipeline job
"""

from __future__ import unicode_literals
from __future__ import absolute_import

from typing import List, Tuple, AnyStr, Type, Dict, Optional
from six import string_types

from pyspark.sql import DataFrame

from copy import deepcopy
from importlib import import_module

from abc import ABCMeta, abstractmethod
from .dataset import DataSetSpec, OutputSpec, save_dataframe_by_spec, partition_dataframe_by_spec
from .reader import KgReader

import logging

logger = logging.getLogger(__name__)


class JobConfigError(Exception):
    pass


class JobInputMisMatchError(Exception):
    pass


class JobOutputMisMatchError(Exception):
    pass


class EmptyDataFrame(object):
    pass


EMPTY_DATAFRAME = EmptyDataFrame()


class KgJob(object):
    """
    pipeline中job的基类。所有的job需要覆盖`process()`方法
    """
    __metaclass__ = ABCMeta

    # type: Dict[AnyStr, Type]
    JOB_REGISTRY = {}

    @staticmethod
    def register(job_type_name, job_class):  # type: (AnyStr, Type) -> None
        """
        注册job定义类

        :param job_type_name: str，job类型名称
        :param job_class: class，job类
        :return:  None
        """
        if job_type_name in KgJob.JOB_REGISTRY:
            raise Exception("Job type {0} already registered!".format(job_type_name))
        KgJob.JOB_REGISTRY[job_type_name] = job_class

    @staticmethod
    def get_job(job_type_name):  # type: (AnyStr) -> Optional[Type]
        """
        根据job类型名称获取job类

        :param job_type_name: str，job类型名称
        :return: KgJob类，未找到指定job类型则返回None
        """
        job_class = KgJob.JOB_REGISTRY.get(job_type_name)
        if job_class:
            return job_class
        # internal default module
        import_module("kgpipeline.jobs.{0}job".format(job_type_name))
        return KgJob.JOB_REGISTRY.get(job_type_name)

    def __init__(self, config, reader):  # type: (Dict, KgReader) -> None
        """
        初始化

        :param config: dict，job定义信息
        :param reader: `KgReader`

        Notes
        -----
        `self._config` 为传入的config参数
        `self.config` 为job的config信息
        """
        self._config = config
        self._reader = reader
        self.name = config.get("name", "")
        self.type = config.get("type")

        if self.type is None:
            raise JobConfigError("No job type specified!")

        self.config = config.get("config", {})
        self.input = config.get("input", [])
        self.output = config.get("output", [])

        # create and register input
        for i in self.input:
            if i and isinstance(i["spec"], dict):
                spec = DataSetSpec.from_dict(i["spec"])
                spec.content = spec.content.replace("$DATE", self.config["date"])
                i["spec"] = spec
                self._reader.register(spec)

    def save_output(self, df_list):  # type: (List[DataFrame]) -> None
        """
        保存数据集的输出
        """
        for output_config, df in zip(self.output, df_list):
            if output_config is None:
                continue

            if df is None:
                df = EMPTY_DATAFRAME
            # in case no column in df (e.g.: all cols dropped)
            elif not df.schema.fieldNames():
                df = EMPTY_DATAFRAME

            spec = OutputSpec.from_dict(output_config)

            if isinstance(spec.data_set_spec, string_types):
                data_set_id = spec.data_set_spec
                if spec.action == "save" or spec.action == "save_and_cache":
                    spec.data_set_spec = self._reader.source(data_set_id)
                    if not spec.data_set_spec:
                        raise JobConfigError("no data set found for output {0}".format(data_set_id))
            else:
                data_set_id = spec.data_set_spec.id
                spec.data_set_spec.content = spec.data_set_spec.content.replace("$DATE", self.config["date"])

            if df is not EMPTY_DATAFRAME:
                df = partition_dataframe_by_spec(df, spec)

            if spec.action == "save":
                self._reader.register(spec.data_set_spec)
                if df is not EMPTY_DATAFRAME:
                    save_dataframe_by_spec(df, spec)
            elif spec.action == "save_and_cache":
                self._reader.register(spec.data_set_spec, df)
                if df is not EMPTY_DATAFRAME:
                    save_dataframe_by_spec(df, spec)
            elif spec.action == "cache":
                self._reader.register(data_set_id, df)
            else:
                raise JobConfigError(
                    "Invalid output action (must be one of save, cache, save_and_cache): {0}".format(spec.action))

    @abstractmethod
    def process(self, inputs):  # type: (List[Tuple[DataFrame, dict]]) -> List[DataFrame]
        """
        实际的任务需要继承`KgJob`，并实现函数`process()`

        :param inputs: [(DataFrame, dict)] DataFrame及其options组成的list
        :return: [DataFrame]，DataFrame列表
        """
        pass

    def run(self):
        """
        读取输入数据，执行`process()`过程，然后保存输出结果
        """
        logger.info("Starting job: {}".format(self.name))

        inputs = []
        for input_config in self.input:
            if input_config is None or "spec" not in input_config or input_config["spec"] is None:
                inputs.append((None, {}))
            else:
                input_options = deepcopy(input_config)

                spec = input_options.pop("spec")

                if isinstance(spec, string_types):
                    spec_id = spec
                elif isinstance(spec, DataSetSpec):
                    spec_id = spec.id
                else:
                    raise JobConfigError("Input is neither string id nor valid DataSetSpec!")

                df = self._reader.input(spec)

                if df is EMPTY_DATAFRAME:
                    logger.warn("Empty input detected: {0}".format(spec_id))

                if "show" in input_options and input_options.pop("show") and df is not EMPTY_DATAFRAME:
                    df.show()

                if df is EMPTY_DATAFRAME:
                    inputs.append((None, input_options))
                else:
                    inputs.append((df, input_options))

        outputs = self.process(inputs)

        if outputs is None:
            outputs = []

        # 检查实际输出的个数和job配置中预定义的个数是否相等
        if len(outputs) != len(self.output):
            raise JobOutputMisMatchError(
                "Job expected {0} output, while actually specified {1}!".format(len(self.output), len(outputs)))

        if len(outputs) > 0:
            self.save_output(outputs)

        logger.info("Done job: {}".format(self.name))
