#! /usr/bin/env python3
import copy
from hashlib import md5
from random import randint

from pyspark.ml.param import Params, Param
from pyspark.sql import DataFrame, Row
from pyspark.sql.types import StringType

from gai.v2 import gzlog
from gai.v2.conf.conf_utils import IdMapperConfiguration, GaiConfiguration, SparkConfiguration
from gai.v2.generic import equal_subranges
from gai.v2.utils.platform import get_extra_params
from gai.v2.utils.spark import num_partitions_from
from gai.v2.spark.base import SparkTransformer
from gai.v2.spark.external.ids2gid_client import Ids2gidClient
from gai.v2.utils import get_or_create_spark_session

logger = gzlog.logger


def target_day(day: str):
    """
    Given a date string in "YYYYMMDD" format,  generates the target query day
    for ids2gid mapping.

    Args:
        day:
            date string in "YYYYMMDD" format

    Returns:
        the target query day
    """

    assert len(day) == 8 and day.isdigit()

    year = int(day[0:4])
    month = int(day[4:6])
    target_day = None
    if month in (1, 2, 3):
        target_day = "%s%02d%02d" % (year - 1, 12, 31)
    elif month in (4, 5, 6):
        target_day = "%s%02d%02d" % (year, 3, 31)
    elif month in (7, 8, 9):
        target_day = "%s%02d%02d" % (year, 6, 30)
    elif month in (10, 11, 12):
        target_day = "%s%02d%02d" % (year, 9, 30)
    return target_day


def make_map2gids(client, batch_size, output_id_col,
                  input_day_col, input_id_col, input_id_type, extra_params):
    def xstr(s):
        return "" if not s else s

    def gen_table_name(date):
        date = target_day(date)
        return "gz_ft:ids2gid_" + date

    def gen_rowkey(x):
        if x is None:
            return md5('non-existent'.encode()).hexdigest()
        elif input_id_type.endswith('md5'):
            return x
        else:
            return md5(x.encode()).hexdigest()

    def equal(x, y):
        return target_day(x[input_day_col]) == target_day(y[input_day_col])

    def map2gids(rows):
        for subrange in equal_subranges(rows, equal, max_size=batch_size):
            table_name = gen_table_name(subrange[0][input_day_col])
            rowkeys = [gen_rowkey(x[input_id_col]) for x in subrange]
            result = client.batch_get(table_name, rowkeys,
                                      max_retries=6, sleep_secs=1,
                                      extra_params=extra_params)
            result = [xstr(x.get("cells").get("cf:gid")) for x in result]

            for x, y in zip(subrange, result):
                row_dict = x.asDict()
                row_dict[output_id_col] = y
                yield Row(**row_dict)

    return map2gids


class IdMapper(SparkTransformer):
    """Given the input id column name, the input id type, the day column name,
    the output id column name, and the the output id type, an instance of
    ``IdMapper`` can transform the pairs of input id and day to gid's.


    Args:
        inputIdCol:
            the name of the input id column
        inputIdType:
            the input id type, such as ``'imei'``. The list of all the supported
            input id types is returned by ``IdMapper.supportedInputIdTypes()``.
        inputDayCol:
            the name of the day column
        outputIdCol:
            the name of the output id column
        outputIdType:
            the output id type, such as ``'gid'``. The list of all the supported
            output id types is returned by ``IdMapper.supportedOutputIdTypes()``.


    >>> spark = get_or_create_spark_session()
    >>> source_df = spark.createDataFrame([(1, "20180930", "00000000025f"),
    ...                                    (2, "20180830", "000000000498"),
    ...                                    (3, "20180930", "non-existent"),
    ...                                    (4, "20180930", None)],
    ...                                   schema=['secret', 'day', 'mac'])
    >>> source_df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- mac: string (nullable = true)
    <BLANKLINE>
    >>> from gai.v2.spark.transformer import IdMapper
    >>> id_mapper = IdMapper(inputIdCol="mac", inputIdType="mac", inputDayCol="day",
    ...                      outputIdCol="gid", outputIdType="gid")
    >>> df = id_mapper.transform(source_df)
    >>> df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- mac: string (nullable = true)
     |-- gid: string (nullable = true)
    <BLANKLINE>
    >>> df.count()
    4
    >>> df = df.orderBy(['secret'], ascending=[1])
    >>> df.show()
    +------+--------+------------+--------------------+
    |secret|     day|         mac|                 gid|
    +------+--------+------------+--------------------+
    |     1|20180930|00000000025f|ANDROID-dc911a838...|
    |     2|20180830|000000000498|ANDROID-e9b502724...|
    |     3|20180930|non-existent|                    |
    |     4|20180930|        null|                    |
    +------+--------+------------+--------------------+
    <BLANKLINE>
    >>> df = id_mapper.transform(source_df)
    >>> df.printSchema()
    root
     |-- secret: long (nullable = true)
     |-- day: string (nullable = true)
     |-- mac: string (nullable = true)
     |-- gid: string (nullable = true)
    <BLANKLINE>
    """

    inputIdCol = Param(Params._dummy(), "inputIdCol", "the name of the input id column")
    inputIdType = Param(Params._dummy(), "inputIdType", "the input id type")
    inputDayCol = Param(Params._dummy(), "inputDayCol", "the name of the day column")
    outputIdCol = Param(Params._dummy(), "outputIdCol", "the name of the output id column")
    outputIdType = Param(Params._dummy(), "outputIdType", "the output id type")

    def __init__(self, inputIdCol, inputIdType, inputDayCol="day", outputIdCol="gid", outputIdType="gid",
                 stageConf=None):
        super(IdMapper, self).__init__()

        self.setInputIdCol(inputIdCol) \
            .setInputIdType(inputIdType) \
            .setInputDayCol(inputDayCol) \
            .setOutputIdCol(outputIdCol) \
            .setOutputIdType(outputIdType)

        self._env_conf = {**GaiConfiguration.get_properties(),
                          **SparkConfiguration.get_properties()}

        self._stage_conf = {**IdMapperConfiguration.get_properties(),
                            **(stageConf or {})}

        self._post_init_check()

    def _transform(self, dataset: DataFrame):
        output_id_col = self.getOutputIdCol()
        new_schema = copy.deepcopy(dataset.schema).add(output_id_col, StringType())
        spark = get_or_create_spark_session()

        if dataset.count() == 0:
            return spark.createDataFrame(spark.sparkContext.emptyRDD(), new_schema)
        else:
            dataset_size = dataset.count()
            sampling_factor = 20
            num_partitions = num_partitions_from(dataset_size, sampling_factor)

            input_day_col = self.getInputDayCol()
            key_fun = lambda x: (x[input_day_col],
                                 randint(0, sampling_factor * dataset_size))
            sorted_rdd = dataset.rdd.sortBy(key_fun,
                                            numPartitions=num_partitions)

            client = Ids2gidClient(url=self._env_conf['ids2gid.api'],
                                   user_name=self._env_conf['hbase.client.username'],
                                   secret=self._env_conf['hbase.client.password'])

            batch_size = int(self._stage_conf["ids2gid.batch.size"])

            map2gids = make_map2gids(client, batch_size,
                                     output_id_col=output_id_col,
                                     input_day_col=input_day_col,
                                     input_id_col=self.getInputIdCol(),
                                     input_id_type=self.getInputIdType(),
                                     extra_params=get_extra_params())
            new_dataset = sorted_rdd.mapPartitions(map2gids)
            return spark.createDataFrame(new_dataset, new_schema)

    def setInputIdCol(self, inputIdCol):
        """Sets the name of input id column.

        Args:
            inputIdCol: the name of the input id column

        Returns:
            ``self``
        """
        self._paramMap[self.inputIdCol] = inputIdCol
        return self

    def getInputIdCol(self):
        """
        Returns:
            the name of input id column
        """
        return self.getOrDefault(self.inputIdCol)

    def setInputIdType(self, inputIdType):
        """Sets the input id type.

        Args:
            inputIdType: the input id type

        Returns:
            ``self``
        """
        self._paramMap[self.inputIdType] = inputIdType
        return self

    def getInputIdType(self):
        """
        Returns:
            the name of input id type
        """
        return self.getOrDefault(self.inputIdType)

    def setInputDayCol(self, inputDayCol):
        """Sets the day column.

        Args:
            inputDayCol: the name of the day column

        Returns:
            ``self``
        """
        self._paramMap[self.inputDayCol] = inputDayCol
        return self

    def getInputDayCol(self):
        """
        Returns:
            the name of input day column
        """
        return self.getOrDefault(self.inputDayCol)

    def getInputCols(self):
        """
        Returns:
            the list of input columns
        """
        return [self.getInputIdCol(), self.getInputDayCol()]

    def setOutputIdCol(self, outputIdCol):
        """Sets the output id column.

        Args:
            outputIdCol: the name of the output id column

        Returns:
            ``self``
        """
        self._paramMap[self.outputIdCol] = outputIdCol
        return self

    def getOutputIdCol(self):
        """
        Returns:
            the name of output id column
        """
        return self.getOrDefault(self.outputIdCol)

    def setOutputIdType(self, outputIdType):
        """Sets the output id type.

        Args:
            outputIdType: the output id type

        Returns:
            ``self``
        """
        self._paramMap[self.outputIdType] = outputIdType
        return self

    def getOutputIdType(self):
        """
        Returns:
            the name of output id type
        """
        return self.getOrDefault(self.outputIdType)

    def getOutputCol(self):
        """

        Returns:
            the name of the output column
        """
        return self.getOutputIdCol()

    def getOutputCols(self):
        """
        Returns:
            the list of output column(s)
        """
        return [self.getOutputCol()]

    @property
    def _column_appending(self):
        return True

    @staticmethod
    def supportedInputIdTypes():
        """

        Returns:
            the list of all the supported input id types
        """
        return ["mac", "imei", "imsi", "adid", "aaid", "idfa", "device_id",
                "device_token", "gid",
                "mac_md5", "imei_md5", "imsi_md5", "adid_md5", "aaid_md5",
                "idfa_md5", "device_id_md5", "device_token_md5", "phone_md5"]

    @staticmethod
    def supportedOutputIdTypes():
        """

        Returns:
            the list of all the supported output id types
        """
        return ["gid"]

    def _post_init_check(self):
        if self.getInputIdType() not in self.supportedInputIdTypes():
            raise ValueError(
                "The given input id type '{}' is not supported. The supported input id types are: {}".format(
                    self.getInputIdType(), self.supportedInputIdTypes()))

        elif self.getOutputIdType() not in self.supportedOutputIdTypes():
            raise ValueError(
                "The given output id type '{}' is not supported. The supported output id types are: {}".format(
                    self.getOutputIdType(), self.supportedOutputIdTypes()))
