#! /usr/bin/env python3
from typing import Callable, Optional, Dict

from pyspark import Row, keyword_only
from pyspark.ml.param import Param, Params
from pyspark.sql.types import StructType, StructField, LongType, StringType
from pyspark.sql import DataFrame
from pyspark.sql.functions import *

from gai.v2.spark.base import SparkTransformer
from gai.v2.spark.demo_data import demo_data_for_cell_nullifier
from gai.v2.spark.feature.summary import summary_of
from gai.v2.utils import get_or_create_spark_session


def drop_nonnumeric_columns(df):
    """Drops the non-numeric columns.

    Args:
        df:
            the dataframe.
    Returns:
        the result dataframe.
    """
    numeric_types = {'int', 'bigint', 'double', 'float', 'tinyint', 'smallint'}
    for name, type_ in df.dtypes:
        if type_ not in numeric_types:
            df = df.drop(df[name])
    return df


def make_nullify_cells(colums, null_equivalent: dict, non_fatal_cols: list = (),
                       valid_fun: Optional[Callable[[Row], bool]] = None):
    assert set(non_fatal_cols) <= set(colums)
    fatal_cols = set(colums) - set(non_fatal_cols)

    def default_is_nonnull(row):
        for col in fatal_cols:
            if row[col] is None:
                return False
            if row[col] in null_equivalent.get(col, []):
                return False
        return True

    def fun(rows):
        for row in rows:
            is_valid = valid_fun or default_is_nonnull
            if is_valid(row):
                row_dict = row.asDict()
                for non_fatal_col in non_fatal_cols:
                    if row_dict[non_fatal_col] is not None \
                            and row_dict[non_fatal_col] in (null_equivalent.get(non_fatal_col) or []):
                        row_dict[non_fatal_col] = None
                yield row_dict

    return fun


def refill_rdd(non_fatal_cols, fill_dict):
    def fun(rows):
        for row in rows:
            row_dict = row.asDict()
            for col in non_fatal_cols:
                if row_dict.get(col) is None:
                    row_dict[col] = fill_dict.get(col)
            yield row_dict

    return fun


class PreProcessor(SparkTransformer):
    """Given a pre-set strategy, a dictionary that mark dummy values (optional), a dictionary with the replacement of non
    fatal miss (optional), a customize function that verify rows in DataFrame , transform a raw `DataFrame` into a
    complete ``DataFrame``

    Args:
        strategy:
            the strategy of processing missing attribute
                - ``DEL_ALL_MISSING``: delete row that missing any attribute
                - ``FILL_WITH_GIVEN``: fill all missing attributes with value given in ``fill_dict``
                - ``DEL_FATAL_FILL_OTHER_WITH_GIVEN``: delete all row with attribute missing in fatal columns, fill \
                    other missing with value in ``fill dict``
                - ``CUSTOMIZE``: delete rows that are rejected by ``valid_fun``, replace missing attributes with value \
                    given in ``fill_dict`` (if no corresponding replacement, the attribute will not be replaced)

        dummies_dict:
            dictionary that mark dummy values (will be replace by ``None`` if dummy values captured).
            e.g. ``{<col1-name>: [<dummy-value1>, <dummy-value2>, ...], <col2-name>: [...], ...}``

        fill_dict:
            dictionary that record the replacement for the the non-fatal missing attribute.
            the value can be any specific value or pre-set value i.e. ``MEAN``, ``MAX``, ``MIN`` (only numerical types
            are supported)

        non_fatal_cols:
            list of column names that are not fatal when missing, i.e. will be fill with given replacement.

        valid_fun:
            function that used to verify rows, only required when use ``CUSTOMIZE`` strategy.


    >>> spark = get_or_create_spark_session()
    >>> schema = StructType([StructField("secret", LongType()),
    ...                      StructField("day", StringType()),
    ...                      StructField("gid_", StringType()),
    ...                      StructField('_month_offset', LongType()),
    ...                      StructField('ft_usertags', StringType()),
    ...                      StructField('ft_category_cnt', StringType())])
    >>> df = spark.createDataFrame([(1,'20181114','ANDROID-68b6c301c39c4b37b7b8fb276dba0009',None,'',''),
    ...                             (2,'20181114','ANDROID-68b6c301c39c4b37b7b8fb276dba0008',0,'',''),
    ...                             (2,'20181114','ANDROID-68b6c301c39c4b37b7b8fb276dba0008',-1,'',''),
    ...                             (3,'20181114',None,0,'',''),
    ...                             (3,'20181114',None,-1,'',''),
    ...                             (4,'20181126','ANDROID-0000db9c08a74a8993c4916abb0b32d0',-1,'','3'),
    ...                             (5,'20181126','ANDROID-846049d187054f4c80ab159de37d8f83',0,'022000,026400,02d400','12'),
    ...                             (5,'20181126','ANDROID-846049d187054f4c80ab159de37d8f83',-1,'022000,026400,02d400','13'),
    ...                             (7,'20181126','non-existent',0,'',''),
    ...                             (7,'20181126','non-existent',-1,'',''),
    ...                             (8,'20180709','ANDROID-000340aa512b4f809ddd3c4c9bc932d0',0,'02b000,02b100,h0','14'),
    ...                             (8,'20180709','ANDROID-000340aa512b4f809ddd3c4c9bc932d0',-1,'02b000,02b100,h0','15'),
    ...                             (9,'20180709','ANDROID-00033a1c3f4b4cfd9b502fb0348fd255',-1,'022000,022100,02d300','9'),
    ...                             (11,'20180709',None,-1,'','')],
    ...                             schema="secret string, day string, gid_ string, _month_offset int, ft_usertags string, ft_category_cnt string")
    >>> df.show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     1|20181114|ANDROID-68b6c301c...|         null|                    |               |
    |     2|20181114|ANDROID-68b6c301c...|            0|                    |               |
    |     2|20181114|ANDROID-68b6c301c...|           -1|                    |               |
    |     3|20181114|                null|            0|                    |               |
    |     3|20181114|                null|           -1|                    |               |
    |     4|20181126|ANDROID-0000db9c0...|           -1|                    |              3|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     5|20181126|ANDROID-846049d18...|           -1|022000,026400,02d400|             13|
    |     7|20181126|        non-existent|            0|                    |               |
    |     7|20181126|        non-existent|           -1|                    |               |
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    |     8|20180709|ANDROID-000340aa5...|           -1|    02b000,02b100,h0|             15|
    |     9|20180709|ANDROID-00033a1c3...|           -1|022000,022100,02d300|              9|
    |    11|20180709|                null|           -1|                    |               |
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    >>> pre_processor = PreProcessor(strategy=PreProcessor.DEL_ALL_MISSING,
    ...                              input_cols=['secret', 'day', 'gid_',
    ...                                          '_month_offset', 'ft_category_cnt'],
    ...                              dummies_dict={'gid_': ['non-existent'],
    ...                                            'ft_usertags': [''],
    ...                                            'ft_category_cnt': ['']})
    >>> pre_processor.transform(df).show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     4|20181126|ANDROID-0000db9c0...|           -1|                    |              3|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     5|20181126|ANDROID-846049d18...|           -1|022000,026400,02d400|             13|
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    |     8|20180709|ANDROID-000340aa5...|           -1|    02b000,02b100,h0|             15|
    |     9|20180709|ANDROID-00033a1c3...|           -1|022000,022100,02d300|              9|
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    >>> pre_processor = PreProcessor(strategy=PreProcessor.FILL_WITH_GIVEN,
    ...                              dummies_dict={'gid_': ['non-existent'],
    ...                                            'ft_usertags': [''],
    ...                                            'ft_category_cnt': ['']},
    ...                              fill_dict={'secret': PreProcessor.MIN,
    ...                                         'day': '20181114',
    ...                                         'gid_': 'IPHONE',
    ...                                         '_month_offset': PreProcessor.MAX,
    ...                                         'ft_usertags': '000000',
    ...                                         'ft_category_cnt': PreProcessor.MEAN},)
    >>> pre_processor.transform(df).show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     1|20181114|ANDROID-68b6c301c...|            0|              000000|           11.0|
    |     2|20181114|ANDROID-68b6c301c...|            0|              000000|           11.0|
    |     2|20181114|ANDROID-68b6c301c...|           -1|              000000|           11.0|
    |     3|20181114|              IPHONE|            0|              000000|           11.0|
    |     3|20181114|              IPHONE|           -1|              000000|           11.0|
    |     4|20181126|ANDROID-0000db9c0...|           -1|              000000|              3|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     5|20181126|ANDROID-846049d18...|           -1|022000,026400,02d400|             13|
    |     7|20181126|              IPHONE|            0|              000000|           11.0|
    |     7|20181126|              IPHONE|           -1|              000000|           11.0|
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    |     8|20180709|ANDROID-000340aa5...|           -1|    02b000,02b100,h0|             15|
    |     9|20180709|ANDROID-00033a1c3...|           -1|022000,022100,02d300|              9|
    |    11|20180709|              IPHONE|           -1|              000000|           11.0|
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    >>> pre_processor = PreProcessor(strategy=PreProcessor.DEL_FATAL_FILL_OTHER_WITH_GIVEN,
    ...                              dummies_dict={'gid_': ['non-existent'],
    ...                                            'ft_usertags': [''],
    ...                                            'ft_category_cnt': ['']},
    ...                              fill_dict={'secret': PreProcessor.MIN,
    ...                                         'day': '20181114',
    ...                                         '_month_offset': PreProcessor.MAX,
    ...                                         'ft_usertags': '000000',
    ...                                         'ft_category_cnt': PreProcessor.MEAN},
    ...                              non_fatal_cols=['ft_usertags', 'ft_category_cnt'])
    >>> pre_processor.transform(df).show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     2|20181114|ANDROID-68b6c301c...|            0|              000000|           11.0|
    |     2|20181114|ANDROID-68b6c301c...|           -1|              000000|           11.0|
    |     4|20181126|ANDROID-0000db9c0...|           -1|              000000|              3|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     5|20181126|ANDROID-846049d18...|           -1|022000,026400,02d400|             13|
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    |     8|20180709|ANDROID-000340aa5...|           -1|    02b000,02b100,h0|             15|
    |     9|20180709|ANDROID-00033a1c3...|           -1|022000,022100,02d300|              9|
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    >>> def valid_fun(row):
    ...     if row['_month_offset']==-1:
    ...         return False
    ...     return True
    >>> pre_processor = PreProcessor(strategy=PreProcessor.CUSTOMIZE,
    ...                              dummies_dict={'gid_': ['non-existent'],
    ...                                            'ft_usertags': [''],
    ...                                            'ft_category_cnt': ['']},
    ...                              fill_dict={'secret': PreProcessor.MIN,
    ...                                         'day': '20181114',
    ...                                         '_month_offset': PreProcessor.MAX,
    ...                                         'ft_usertags': '000000',
    ...                                         'ft_category_cnt': PreProcessor.MEAN},
    ...                              non_fatal_cols=['ft_usertags', 'ft_category_cnt'],
    ...                              valid_fun=valid_fun)
    >>> pre_processor.transform(df).show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     1|20181114|ANDROID-68b6c301c...|            0|              000000|           13.0|
    |     2|20181114|ANDROID-68b6c301c...|            0|              000000|           13.0|
    |     3|20181114|                null|            0|              000000|           13.0|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     7|20181126|                null|            0|              000000|           13.0|
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    """

    strategy = Param(Params._dummy(), "strategy",
                     "the strategy of process the missing attribute value")
    dummies_dict = Param(Params._dummy(), "dummies_dict",
                         "dictionary of dummies for cols eg.'{'col1_name': ['dummy1', ], }', used in all strategy ")
    fill_dict = Param(Params._dummy(), "fill_dict",
                      "dictionary of replacement of no fatal miss attribute value")
    non_fatal_cols = Param(Params._dummy(), "non_fatal_cols",
                           "names of the columns that can be missing and replaced by certain value")
    valid_fun = Param(Params._dummy(), "valid_fun",
                      "function that judge if a row is valid, in the structure of Callable[[Row], bool]")
    input_cols = Param(Params._dummy(), "input_cols",
                       "the list of columns that need to be process")

    #: option of :py:attr:`strategy`, delete row that missing any attribute
    DEL_ALL_MISSING = 'DEL_ALL_MISSING'
    #: option of :py:attr:`strategy`, fill all missing attributes with value given in :py:attr:`fill_dict`
    #: *Notice*: under this strategy, all columns should have replacement in :py:attr:`fill_dict`
    FILL_WITH_GIVEN = 'FILL_WITH_GIVEN'
    #: option of :py:attr:`strategy`, delete all row with attribute missing in fatal columns, fill other missing with \
    #: value in :py:attr:`fill dict`
    DEL_FATAL_FILL_OTHER_WITH_GIVEN = 'DEL_FATAL_FILL_OTHER_WITH_GIVEN'
    #: option of :py:attr:`strategy`, delete rows that rejrcted by ``valid_fun``, replace missing attributes \
    #: with value given in :py:attr:`fill_dict` (if no corresponding replacement, the attribute will not be replaced)
    CUSTOMIZE = 'CUSTOMIZE'

    #: used in :py:attr:`fill_dict`, placeholder for mean of corresponding column
    MEAN = '#MEAN'
    #: used in :py:attr:`fill_dict`, placeholder for max of corresponding column
    MAX = '#MAX'
    #: used in :py:attr:`fill_dict`, placeholder for min of corresponding column
    MIN = '#MIN'

    @keyword_only
    def __init__(self, strategy=DEL_ALL_MISSING, input_cols=None, dummies_dict: Dict[str, list] = None,
                 fill_dict: Dict = None, non_fatal_cols: list = None,
                 valid_fun: Optional[Callable[[Row], bool]] = None):
        super(PreProcessor, self).__init__()
        self._setDefault(strategy=self.DEL_ALL_MISSING, input_cols=[], dummies_dict={}, fill_dict={},
                         non_fatal_cols=[], valid_fun=None)
        kwargs = self._input_kwargs
        self.spark = get_or_create_spark_session()
        self.setParams(**kwargs)

    # def __init__(self, strategy, **kwargs):
    #     super(PreProcessor, self).__init__()
    #     self.strategy = strategy
    #     self.dummies_dict = kwargs.get('dummies_dict') or {}
    #
    #     if strategy == self.FILL_WITH_GIVEN \
    #             or strategy == self.DEL_FATAL_FILL_OTHER_WITH_GIVEN\
    #             or strategy == self.CUSTOMIZE:
    #         self._fill_param(kwargs, 'fill_dict')
    #         if strategy == self.DEL_FATAL_FILL_OTHER_WITH_GIVEN:
    #             self._fill_param(kwargs, 'non_fatal_cols')
    #         if strategy == self.CUSTOMIZE:
    #             self._fill_param(kwargs, 'valid_fun')

    @keyword_only
    def setParams(self, strategy=DEL_ALL_MISSING, input_cols=None, dummies_dict: Dict[str, list] = None,
                  fill_dict: Dict = None, non_fatal_cols: list = None,
                  valid_fun: Optional[Callable[[Row], bool]] = None):

        kwargs = self._input_kwargs
        self.spark = get_or_create_spark_session()
        return self._set(**kwargs)

    def setStrategy(self, strategy):
        """
        Sets the value of :py:attr:`strategy`.
        """
        return self._set(strategy=strategy)

    def getStrategy(self):
        """
        Get the value of :py:attr:`strategy` or its default value
        """
        return self.getOrDefault(self.strategy)

    def setDummiesDict(self, strategy):
        """
        Sets the value of :py:attr:`dummies_dict`.
        """
        return self._set(dummies_dict=strategy)

    def getDummiesDict(self):
        """
        Get the value of :py:attr:`dummies_dict' or its default value
        """
        return self.getOrDefault(self.dummies_dict)

    def setFillDict(self, fill_dict):
        """
        Sets the value of :py:attr:`fill_dict`.
        """
        return self._set(fill_dict=fill_dict)

    def getFillDict(self):
        """
        Get the value of :py:attr:`fill_dict` or its default value
        """
        return self.getOrDefault(self.fill_dict)

    def setNonFatalCols(self, non_fatal_cols):
        """
        Sets the value of :py:attr:`non_fatal_cols`.
        """
        return self._set(non_fatal_cols=non_fatal_cols)

    def getNonFatalCols(self):
        """
        Get the value of :py:attr:`non_fatal_cols` or its default value
        """
        return self.getOrDefault(self.non_fatal_cols)

    def setValidFun(self, valid_fun):
        """
        Sets the value of :py:attr:`valid_fun`.
        """
        return self._set(valid_fun=valid_fun)

    def getValidFun(self):
        """
        Get the value of :py:attr:`valid_fun` or its default value
        """
        return self.getOrDefault(self.valid_fun)

    def setInputCols(self, input_cols):
        """
        Set the value of :py:attr:`input_cols`.
        """
        return self._set(input_cols=input_cols)

    def getInputCols(self):
        return self.getOrDefault(self.input_cols)

    def _fill_param(self, kwargs, param_name):
        if kwargs.get(param_name):
            exec('self.%s = kwargs.get(param_name)' % param_name)
        else:
            raise ValueError('Param %s not given' % param_name)

    def _transform(self, dataset: DataFrame):
        strategy = self.getStrategy()
        dummies_dict = self.getDummiesDict()
        fill_dict = self.getFillDict()
        non_fatal_cols = self.getNonFatalCols()
        valid_fun = self.getValidFun()
        input_cols = self.getInputCols() or dataset.columns

        if strategy == self.DEL_ALL_MISSING:
            clean_rdd_to_dict_fun = make_nullify_cells(input_cols, dummies_dict)
            result_rdd = dataset.rdd.mapPartitions(clean_rdd_to_dict_fun)

        elif strategy == self.FILL_WITH_GIVEN:
            assert set(dataset.columns) <= fill_dict.keys()
            result_rdd = self._clean_fill_rdd(dataset=dataset, input_cols=input_cols, dummies_dict=dummies_dict,
                                              fill_dict=fill_dict, non_fatal_cols=dataset.columns)

        elif strategy == self.DEL_FATAL_FILL_OTHER_WITH_GIVEN:
            assert set(non_fatal_cols) <= fill_dict.keys()
            result_rdd = self._clean_fill_rdd(dataset=dataset, input_cols=input_cols, dummies_dict=dummies_dict,
                                              fill_dict=fill_dict, non_fatal_cols=non_fatal_cols)

        elif strategy == self.CUSTOMIZE:
            assert str(type(valid_fun)) == "<class 'function'>"
            result_rdd = self._clean_fill_rdd(dataset=dataset, input_cols=input_cols, dummies_dict=dummies_dict,
                                              fill_dict=fill_dict, non_fatal_cols=dataset.columns, valid_fun=valid_fun)
        else:
            raise ValueError("strategy '%s' not supported" % strategy)
        return self.spark.createDataFrame(result_rdd, dataset.schema)

    def _clean_fill_rdd(self, dataset: DataFrame, input_cols, dummies_dict, fill_dict, non_fatal_cols,
                        valid_fun: Optional[Callable[[Row], bool]] = None):
        clean_rdd_to_dict_fun = make_nullify_cells(input_cols, dummies_dict, non_fatal_cols, valid_fun)
        clean_rdd = dataset.rdd.mapPartitions(clean_rdd_to_dict_fun)
        clean_dataset = self.spark.createDataFrame(clean_rdd, dataset.schema)
        self._update_fill_dict(clean_dataset, fill_dict)
        refill_rdd_fun = refill_rdd(non_fatal_cols, fill_dict)
        result_rdd = clean_dataset.rdd.mapPartitions(refill_rdd_fun)
        return result_rdd

    def _update_fill_dict(self, dataset: DataFrame, fill_dict):
        for k, v in fill_dict.items():
            if v == self.MEAN:
                fill_dict[k] = dataset.select(mean(k)).first()['avg(' + k + ')']
            elif v == self.MAX:
                fill_dict[k] = dataset.select(max(k)).first()['max(' + k + ')']
            elif v == self.MIN:
                fill_dict[k] = dataset.select(min(k)).first()['min(' + k + ')']
            # elif v == self.MODE:
            #     self.fill_dict


if __name__ == "__main__":
    from pyspark.sql.functions import *

    df = demo_data_for_cell_nullifier()


    # spark = get_or_create_spark_session()
    # df = spark.createDataFrame(
    #     [(1, '20181114', 'ANDROID-68b6c301c39c4b37b7b8fb276dba0009', None, '', ''),
    #      (1, '20181114', 'ANDROID-68b6c301c39c4b37b7b8fb276dba0009', -1, '', ''),
    #      (2, '20181114', 'ANDROID-68b6c301c39c4b37b7b8fb276dba0008', 0, '', ''),
    #      (2, '20181114', 'ANDROID-68b6c301c39c4b37b7b8fb276dba0008', -1, '', ''),
    #      (3, '20181114', None, 0, '', ''),
    #      (3, '20181114', None, -1, '', ''),
    #      (4, '20181126', 'ANDROID-0000db9c08a74a8993c4916abb0b32d0', 0, '', '3'),
    #      (4, '20181126', 'ANDROID-0000db9c08a74a8993c4916abb0b32d0', -1, '', '3'),
    #      (5, '20181126', 'ANDROID-846049d187054f4c80ab159de37d8f83', 0, '022000,026400,02d400', '12'),
    #      (5, '20181126', 'ANDROID-846049d187054f4c80ab159de37d8f83', -1, '022000,026400,02d400', '13'),
    #      (7, '20181126', 'non-existent', 0, '', ''),
    #      (7, '20181126', 'non-existent', -1, '', ''),
    #      (8, '20180709', 'ANDROID-000340aa512b4f809ddd3c4c9bc932d0', 0, '02b000,02b100,h0', '14'),
    #      (8, '20180709', 'ANDROID-000340aa512b4f809ddd3c4c9bc932d0', -1, '02b000,02b100,h0', '15'),
    #      (9, '20180709', 'ANDROID-00033a1c3f4b4cfd9b502fb0348fd255', 0, '022000,022500,02d300', '7'),
    #      (9, '20180709', 'ANDROID-00033a1c3f4b4cfd9b502fb0348fd255', -1, '022000,022100,02d300', '9'),
    #      (11, '20180709', None, 0, '', ''),
    #      (11, '20180709', None, -1, '', '')],
    #     schema="secret int, day string, gid_ string, _month_offset int, ft_usertags string, ft_category_cnt string"
    # )
    #
    # summary = summary_of(df=df,
    #                      null_equiv={'gid_'           : lambda x: x in ['non-existent'],
    #                                  'ft_usertags'    : [''],
    #                                  'ft_category_cnt': ['']},
    #                      metrics=[])
    #
    # summary.show()

    def ff(row):
        if row['_month_offset'] == -1:
            return False
        return True


    a = PreProcessor(strategy=PreProcessor.CUSTOMIZE,
                     dummies_dict={'gid_'           : ['non-existent'],
                                   'ft_usertags'    : [''],
                                   'ft_category_cnt': ['']},
                     fill_dict={'ft_category_cnt': PreProcessor.MEAN,
                                'secret'         : PreProcessor.MIN,
                                'day'            : '20181114',
                                '_month_offset'  : PreProcessor.MAX,
                                'ft_usertags'    : '000000'},
                     non_fatal_cols=['ft_usertags', 'ft_category_cnt'],
                     valid_fun=ff)
    a.transform(df).show()
