#! /usr/bin/env python3
import calendar
import json
from copy import deepcopy
from datetime import datetime, timedelta
from random import randint
from typing import List

from pyspark.ml.param import Params, Param
from pyspark.sql import DataFrame
from pyspark.sql.types import StringType, Row, LongType

from gai.v2.conf.conf_utils import FeatureRetrieverConfiguration, GaiConfiguration, SparkConfiguration
from gai.v2.generic import fixed_width_subranges
from gai.v2.utils.platform import get_extra_params
from gai.v2.utils.spark import num_partitions_from
from gai.v2.spark.base import SparkTransformer
from gai.v2.spark.external.feature_client import FeatureClient, QueryPoint
from gai.v2.utils import get_or_create_spark_session


def target_day(day: str, month_offset: int = 0) -> str:
    """
    Given a date string in 'YYYYMMDD' and a month offset, return the date
    of the latest Monday no later than the given date. If the month offset
    is non-zero, apply a shift of `month_offset * 28` days.

    Args:
        day:
            date string in "YYYYMMDD" format
        month_offset:
            month offset to apply on the date

    Returns:
        a date string in "YYYYMMDD" format

    >>> target_day('20181114')
    '20181112'
    >>> target_day('20181112')
    '20181112'
    >>> target_day('20181114',1)
    '20181210'
    >>> target_day('20181114',-1)
    '20181015'
    """

    assert len(day) == 8 and day.isdigit()

    yy = int(day[0:4])
    mm = int(day[4:6])
    dd = int(day[6:8])

    weekday = calendar.weekday(yy, mm, dd)
    target = datetime.strptime(day, '%Y%m%d')
    target = target - timedelta(days=weekday) + timedelta(days=28 * month_offset)

    return target.strftime('%Y%m%d')


def make_map2features(client, batch_size, features,
                      month_span, input_day_col, input_id_col,
                      month_offset_col, extra_params):
    def generate_query_point(row: Row, index, input_day_col, input_id_col):
        return QueryPoint(
            day=target_day(row[input_day_col], -index),
            gid=row[input_id_col]
        )

    def generate_result_row(input_row, query_result, index, features):
        result_dict = input_row.asDict()
        cells = query_result.get('cells')
        if len(cells.items()):
            temp = list(cells.values())
            if len(temp):
                cells = json.loads(temp[0])
            else:
                cells = {}

        for feature in features:
            result_dict[feature] = cells.get(feature, "")

        result_dict[month_offset_col] = -index

        return Row(**result_dict)

    def indexed_map2features(rows, index, extra_params):
        f = lambda row: generate_query_point(row, index, input_day_col, input_id_col)
        query_points = list(map(f, rows))

        query_results = client.batch_get_multi(query_points, features,
                                               max_retries=6, sleep_secs=1,
                                               extra_params=extra_params)

        g = lambda row, result: generate_result_row(row, result, index, features)
        final_results = map(g, rows, query_results)
        return final_results

    def lazy_map2features(rows):
        num_months = max(month_span, 1)

        for buffer in fixed_width_subranges(rows, batch_size):
            for index in range(num_months):
                for x in indexed_map2features(buffer, index, extra_params):
                    yield x

    return lazy_map2features


class FeatureRetriever(SparkTransformer):
    """Given the name of id column (``inputIdCol``), the name of day column
    (``inputDayCol``), and the list of features (``outputFeatureCols``),
    a ``FeatureExtractor`` can retrieve the specified features for the pair of id
    and day.


    Args:
        inputIdCol: the name of the input id column
        inputDayCol: the name of the day column
        outputFeatureCols: a list of features to search for
        extraParams: a key-value dictionary that represent extra parameters.
            Options are ``'span.in.months'``


    >>> from pyspark.sql import SparkSession
    >>> spark = get_or_create_spark_session()
    >>> source_df = spark.createDataFrame([(1, "20191104", "ANDROID-68b6c301c39c4b37b7b8fb276dba0009"),
    ...                             (2, "20191104", "ANDROID-68b6c301c39c4b37b7b8fb276dba0008"),
    ...                             (3, "20191104", None),
    ...                             (4, "20191026", "ANDROID-0000db9c08a74a8993c4916abb0b32d0"),
    ...                             (5, "20191026", "ANDROID-846049d187054f4c80ab159de37d8f83"),
    ...                             (6, "20191026", "ANDROID-7b84901dbc1f4eaa8217b738a9cc3760"),
    ...                             (7, "20191026", "non-existent"),
    ...                             (8, "20191009", "ANDROID-000340aa512b4f809ddd3c4c9bc932d0"),
    ...                             (9, "20191009", "ANDROID-00033a1c3f4b4cfd9b502fb0348fd255"),
    ...                             (10, "20191009", "ANDROID-00031b1631714789ae64bd4a8071bb41"),
    ...                             (11, "20191009", None),
    ...                             ],
    ...                            schema=("secret", "day", "gid_"))
    >>> source_df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- gid_: string (nullable = true)
    <BLANKLINE>
    >>> from gai.v2.spark.transformer import FeatureRetriever
    >>> feature_retriever = FeatureRetriever(inputIdCol="gid_",
    ...                                      inputDayCol="day",
    ...                                      outputFeatureCols=["ft_usertags", "ft_category_cnt"])
    >>> df = feature_retriever.transform(source_df)
    >>> df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- gid_: string (nullable = true)
     |-- ft_usertags: string (nullable = true)
     |-- ft_category_cnt: string (nullable = true)
    <BLANKLINE>
    >>> feature_retriever = FeatureRetriever(inputIdCol="gid_",
    ...                                      inputDayCol="day",
    ...                                      outputFeatureCols=["ft_usertags", "ft_category_cnt"],
    ...                                      extraParams={'span.in.months': 2})
    >>> df = feature_retriever.transform(source_df)
    >>> df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- gid_: string (nullable = true)
     |-- _month_offset: long (nullable = true)
     |-- ft_usertags: string (nullable = true)
     |-- ft_category_cnt: string (nullable = true)
    <BLANKLINE>
    >>> df = df.orderBy(["secret", "_month_offset"], ascending=[1, 0])
    >>> df.show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     1|20191104|ANDROID-68b6c301c...|            0|                    |               |
    |     1|20191104|ANDROID-68b6c301c...|           -1|                    |               |
    |     2|20191104|ANDROID-68b6c301c...|            0|                    |               |
    |     2|20191104|ANDROID-68b6c301c...|           -1|                    |               |
    |     3|20191104|                null|            0|                    |               |
    |     3|20191104|                null|           -1|                    |               |
    |     4|20191026|ANDROID-0000db9c0...|            0|                    |              3|
    |     4|20191026|ANDROID-0000db9c0...|           -1|                    |              3|
    |     5|20191026|ANDROID-846049d18...|            0|022000,026400,02d...|             12|
    |     5|20191026|ANDROID-846049d18...|           -1|022000,026400,02d...|             13|
    |     6|20191026|ANDROID-7b84901db...|            0|022000,022100,n51...|             13|
    |     6|20191026|ANDROID-7b84901db...|           -1|022000,022100,01h...|             13|
    |     7|20191026|        non-existent|            0|                    |               |
    |     7|20191026|        non-existent|           -1|                    |               |
    |     8|20191009|ANDROID-000340aa5...|            0|02b000,02b100,h0,...|             17|
    |     8|20191009|ANDROID-000340aa5...|           -1|02b000,02b100,h0,...|             17|
    |     9|20191009|ANDROID-00033a1c3...|            0|022000,022500,02d...|              7|
    |     9|20191009|ANDROID-00033a1c3...|           -1|022000,022500,02d...|              7|
    |    10|20191009|ANDROID-00031b163...|            0|022000,02b100,014...|             11|
    |    10|20191009|ANDROID-00031b163...|           -1|022000,022100,h42...|             11|
    +------+--------+--------------------+-------------+--------------------+---------------+
    only showing top 20 rows
    <BLANKLINE>
    """

    inputIdCol = Param(Params._dummy(), "inputIdCol", "the name of the input id column")
    inputDayCol = Param(Params._dummy(), "inputDayCol", "the name of the day column")
    outputFeatureCols = Param(Params._dummy(), "outputFeatureCols", "a list of features to search for")

    def __init__(self, inputIdCol: str, inputDayCol="day", outputFeatureCols: List[str] = None,
                 extraParams: dict = None):
        super(FeatureRetriever, self).__init__()

        self.setInputIdCol(inputIdCol) \
            .setInputDayCol(inputDayCol) \
            .setOutputFeatureCols(outputFeatureCols or [])

        self._env_conf = {**GaiConfiguration.get_properties(),
                          **SparkConfiguration.get_properties()}

        self._stage_conf = {**FeatureRetrieverConfiguration.get_properties(),
                            **(extraParams or {"span.in.months": 1})}
        self._post_init_check()

    def _transform(self, dataset: DataFrame):
        input_id_col = self.getInputIdCol()
        input_day_col = self.getInputDayCol()
        output_feature_cols = self.getOutputCols()
        month_span = self._stage_conf.get('span.in.months')
        features = self.getOutputFeatureCols()
        month_offset_col = '_month_offset'

        spark = get_or_create_spark_session()

        client = FeatureClient(url=self._env_conf['retriever.api'],
                               user_name=self._env_conf['hbase.client.username'],
                               secret=self._env_conf['hbase.client.password'])

        new_schema = deepcopy(dataset.schema).add(month_offset_col, LongType())

        for feature_name in output_feature_cols:
            new_schema.add(feature_name, StringType())

        if dataset.count() == 0:
            result = spark.createDataFrame(spark.sparkContext.emptyRDD(), new_schema)
        else:
            batch_size = int(self._stage_conf["query.batch.size"])
            dataset_size = dataset.count()

            sampling_factor = 20
            num_partitions = num_partitions_from(dataset_size, sampling_factor)

            key_fun = lambda x: (x[input_day_col],
                                 randint(0, sampling_factor * dataset_size))
            sorted_rdd = dataset.rdd.sortBy(key_fun,
                                            numPartitions=num_partitions)

            map2features = make_map2features(client, batch_size, features,
                                             month_span, input_day_col,
                                             input_id_col, month_offset_col,
                                             extra_params=get_extra_params())
            new_dataset = sorted_rdd.mapPartitions(map2features)

            result = spark.createDataFrame(new_dataset, new_schema)

        return result.drop(month_offset_col) if month_span <= 1 else result

    def setInputIdCol(self, inputIdCol):
        """Sets the input id column.

        Args:
            inputIdCol: the name of the input id column

        Returns:
            ``self``
        """
        self._paramMap[self.inputIdCol] = inputIdCol
        return self

    def getInputIdCol(self):
        """

        Returns:
            the name of the input id column
        """
        return self.getOrDefault(self.inputIdCol)

    def setInputDayCol(self, inputDayCol):
        """Sets the input day column.

        Args:
            inputDayCol: the name of the input day column

        Returns:
            ``self``
        """
        self._paramMap[self.inputDayCol] = inputDayCol
        return self

    def getInputDayCol(self):
        """

        Returns:
            the name of day column
        """
        return self.getOrDefault(self.inputDayCol)

    def getInputCols(self):
        """

        Returns:
            the list of input column names
        """
        return [self.getInputIdCol(), self.getInputDayCol()]

    def setOutputFeatureCols(self, outputFeatureCols):
        """Sets the list of output feature columns.

        Args:
            outputFeatureCols: a list of desired feature columns

        Returns:
            ``self``
        """
        self._paramMap[self.outputFeatureCols] = outputFeatureCols
        return self

    def getOutputFeatureCols(self):
        """

        Returns:
            the list of features to retrieve
        """
        return self.getOrDefault(self.outputFeatureCols)

    def getOutputCols(self):
        """

        Returns:
            the list of output columns; in this case, the same as
            ``getOutputFeatureCols()``
        """
        return self.getOutputFeatureCols()

    @property
    def _column_appending(self):
        return True

    @staticmethod
    def supportedFeatures():
        """

        Returns:
            the list of available features

        """
        return [
            "ft_2week_rest_active_period_ls",
            "ft_2week_rest_less_act_period_ls",
            "ft_2week_rest_less_act_times",
            "ft_2week_rest_most_act_period_ls",
            "ft_2week_rest_most_act_times",
            "ft_2week_rest_sum_act_times",
            "ft_2week_work_active_period_ls",
            "ft_2week_work_less_act_period_ls",
            "ft_2week_work_less_act_times",
            "ft_2week_work_most_act_period_ls",
            "ft_2week_work_most_act_times",
            "ft_2week_work_sum_act_times",
            "ft_act_cate_ls",
            "ft_act_cate_num",
            "ft_act_pkg_num",
            "ft_act_pkgs",
            "ft_andr_t1_interest_ls",
            "ft_andr_t1_most_interest_ls",
            "ft_andr_t2_interest_ls",
            "ft_andr_t2_most_interest_ls",
            "ft_app_install_category",
            "ft_app_install_pkgs",
            "ft_app_uninstall_category",
            "ft_app_uninstall_pkgs",
            "ft_applist",
            "ft_applist_border",
            "ft_brand",
            "ft_cate_app_cnt_ls",
            "ft_category_cnt",
            "ft_city_stay_city_ls",
            "ft_city_stay_oneday_cnt",
            "ft_city_stay_twoday_cnt",
            "ft_diam_of_action",
            "ft_dis_label",
            "ft_distance",
            "ft_dual_imsi",
            "ft_home_cons_ls",
            "ft_imsi_change_times",
            "ft_imsi_cnt",
            "ft_imsi_duration_stat",
            "ft_imsi_list",
            "ft_installapp_ls",
            "ft_ios_t1_interest_ls",
            "ft_ios_t1_most_interest_ls",
            "ft_ios_t2_interest_ls",
            "ft_ios_t2_most_interest_ls",
            "ft_largest_cate_cnt",
            "ft_largest_cate_ls",
            "ft_lbs_ktv_weekly",
            "ft_max_act_time",
            "ft_month_rest_active_period_ls",
            "ft_month_rest_less_act_period_ls",
            "ft_month_rest_less_act_times",
            "ft_month_rest_most_act_period_ls",
            "ft_month_rest_most_act_times",
            "ft_month_rest_sum_act_times",
            "ft_month_work_active_period_ls",
            "ft_month_work_less_act_period_ls",
            "ft_month_work_less_act_times",
            "ft_month_work_most_act_period_ls",
            "ft_month_work_most_act_times",
            "ft_month_work_sum_act_times",
            "ft_most_act_cate_ls",
            "ft_most_act_pkgs",
            "ft_night_stay_detail_ls",
            "ft_night_stay_oneday_cnt",
            "ft_night_stay_twoday_cnt",
            "ft_night_stay_wifimac_cnt",
            "ft_phone_model",
            "ft_pkg_cnt",
            "ft_pwoi_all_mostoften",
            "ft_pwoi_all_mostoften_consume",
            "ft_pwoi_all_often_consum",
            "ft_pwoi_all_often_ls",
            "ft_pwoi_all_times_ls",
            "ft_pwoi_hour_mostoften_ls",
            "ft_pwoi_hour_often_ls",
            "ft_pwoi_hour_times_ls",
            "ft_pwoi_rest_mostoften",
            "ft_pwoi_rest_often_ls",
            "ft_pwoi_rest_times_ls",
            "ft_residence_stability",
            "ft_rest_pwoi_often_ls",
            "ft_rest_pwoi_times_ls",
            "ft_stable_stops",
            "ft_unusual_imsi_infos",
            "ft_usertags",
            "ft_work_cons_ls",
            "ft_workplace_stability",
            # demo features below
            "ggg", "uuu", "fff", "ccc", "hhh"
        ]

    def _post_init_check(self):
        wanted = set(self.getOutputFeatureCols())
        supported = set(self.supportedFeatures())
        if not wanted.issubset(supported):
            raise ValueError(
                "Features in {} are not supported. The supported features are {}.".format(
                    wanted.difference(supported),
                    supported))
