#! /usr/bin/env python3
import types

import pyspark.ml

from gai.v2.unify.base import find_dataset_type_adapter, Transformer
from gai.v2.unify.dtype import Dataset, DatasetType, PandasDataFrame, SparkDataFrame
from gai.v2.utils import get_or_create_spark_session


class SparkTransformerWrapper(Transformer):
    """A wrapper of ``pyspark.ml.Transformer``.

    """

    def __init__(self, transformer: pyspark.ml.Transformer, instance_name=None, pre_hooks=(), post_hooks=(),
                 type_adaptive=False):
        """

        Args:
            transformer: an instance of ``pyspark.ml.Transformer``
        """
        super(SparkTransformerWrapper, self).__init__(instance_name=instance_name, pre_hooks=pre_hooks,
                                                      post_hooks=post_hooks,
                                                      type_adaptive=type_adaptive)
        self._kernel = transformer

    def _transform(self, dataset: Dataset):
        if dataset.get_type() != self.get_input_type():
            raise RuntimeError("transformer and dataset should match in type")
        else:
            data = dataset.unwrap()
            transformer = self.unwrap()
            data = transformer.transform(data)
            return SparkDataFrame(data)

    def get_input_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def get_output_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def unwrap(self) -> pyspark.ml.Transformer:
        return self._kernel

    def getKernelName(self):
        return self._kernel.__class__.__name__

    def getKernelModule(self):
        return self._kernel.__class__.__module__


class SparkFunctionWrapper(Transformer):
    """A wrapper of unary functions that transform a Spark DataFrame to
    a Spark DataFrame.
    """

    def __init__(self, transform, instance_name=None, pre_hooks=(), post_hooks=(), type_adaptive=False):
        super(SparkFunctionWrapper, self).__init__(instance_name=instance_name, pre_hooks=pre_hooks,
                                                   post_hooks=post_hooks,
                                                   type_adaptive=type_adaptive)
        self._kernel = transform

    def _transform(self, dataset: Dataset):
        if dataset.get_type() != self.get_input_type():
            raise RuntimeError("transformer and dataset should match in type")
        else:
            data = dataset.unwrap()
            transform = self.unwrap()
            data = transform(data)
            return SparkDataFrame(data)

    def get_input_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def get_output_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def unwrap(self):
        return self._kernel

    def getKernelName(self):
        if isinstance(self._kernel, types.FunctionType):
            return self._kernel.__name__
        else:
            return self._kernel.__class__.__name__

    def getKernelModule(self):
        if isinstance(self._kernel, types.FunctionType):
            return self._kernel.__module__
        else:
            return self._kernel.__class__.__module__


class PandasFunctionWrapper(Transformer):
    """A wrapper of unary functions that transforms a pandas DataFrame
    to a pandas DataFrame.

    >>> import pandas as pd
    >>> def uppercase_column_name(dataframe: pd.DataFrame):
    ...     dataframe.columns = dataframe.columns.str.upper()
    ...     return dataframe
    ...
    >>> tfm: Transformer = PandasFunctionWrapper(uppercase_column_name, instance_name="uppercase_column_name-1")
    >>> df = pd.DataFrame({'col1': ["hello", 3], 'col2': ["world", 4]})
    >>> ds: Dataset = PandasDataFrame(df)
    >>> res_ds = tfm.transform(ds)
    >>> res_ds.unwrap().columns[0]
    'COL1'
    >>> tfm.getKernelName()
    'uppercase_column_name'
    >>> tfm.getKernelModule()
    'gai.v2.unify.transformer.variant'
    """

    def __init__(self, transform, instance_name=None, pre_hooks=(), post_hooks=(), type_adaptive=False):
        super(PandasFunctionWrapper, self).__init__(instance_name=instance_name,
                                                    pre_hooks=pre_hooks,
                                                    post_hooks=post_hooks,
                                                    type_adaptive=type_adaptive)
        self._kernel = transform

    def _transform(self, dataset: Dataset):
        assert dataset.get_type() == self.get_input_type()
        data = dataset.unwrap()
        transform = self.unwrap()
        data = transform(data)
        return PandasDataFrame(data)

    def get_input_type(self) -> DatasetType:
        return DatasetType.PANDAS_DATA_FRAME

    def get_output_type(self) -> DatasetType:
        return DatasetType.PANDAS_DATA_FRAME

    def unwrap(self):
        return self._kernel

    def getKernelName(self):
        if isinstance(self._kernel, types.FunctionType):
            return self._kernel.__name__
        else:
            return self._kernel.__class__.__name__

    def getKernelModule(self):
        if isinstance(self._kernel, types.FunctionType):
            return self._kernel.__module__
        else:
            return self._kernel.__class__.__module__


class Spark2PandasTransformer(Transformer):
    """
    >>> from pyspark.sql import SparkSession
    >>> spark = SparkSession.builder.master("local").getOrCreate()
    >>> df = spark.createDataFrame([("Alice", 12), ("Bob", 14)], schema=["name", "age"])
    >>> ds = SparkDataFrame(df)
    >>> tfm = find_dataset_type_adapter(ds.get_type(), DatasetType.PANDAS_DATA_FRAME)
    >>> from gai.v2.unify.transformer import Spark2PandasTransformer
    >>> isinstance(tfm, Spark2PandasTransformer)
    True
    >>> ds = tfm.transform(ds)
    >>> ds.unwrap()
        name  age
    0  Alice   12
    1    Bob   14
    """

    def get_input_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def get_output_type(self) -> DatasetType:
        return DatasetType.PANDAS_DATA_FRAME

    def _transform(self, dataset):
        assert dataset.get_type() == DatasetType.SPARK_DATA_FRAME
        dataset = dataset.unwrap()
        return PandasDataFrame(dataset.toPandas())


class Pandas2SparkTransformer(Transformer):
    """
    >>> import pandas as pd
    >>> d = {"name": ["Alice", "Bob"], "age": [12, 14]}
    >>> df = pd.DataFrame(data=d)
    >>> ds = PandasDataFrame(df)
    >>> tfm = find_dataset_type_adapter(ds.get_type(), DatasetType.SPARK_DATA_FRAME)
    >>> from gai.v2.unify.transformer import Pandas2SparkTransformer
    >>> isinstance(tfm, Pandas2SparkTransformer)
    True
    >>> ds = tfm.transform(ds)
    >>> ds.unwrap().show()
    +-----+---+
    | name|age|
    +-----+---+
    |Alice| 12|
    |  Bob| 14|
    +-----+---+
    <BLANKLINE>
    """

    def get_input_type(self) -> DatasetType:
        return DatasetType.PANDAS_DATA_FRAME

    def get_output_type(self) -> DatasetType:
        return DatasetType.SPARK_DATA_FRAME

    def _transform(self, dataset):
        assert dataset.get_type() == DatasetType.PANDAS_DATA_FRAME
        pandas_df = dataset.unwrap()
        spark = get_or_create_spark_session()
        spark_df = spark.createDataFrame(pandas_df)
        return SparkDataFrame(spark_df)


class IdentityTransformer(Transformer):
    """
    >>> from pyspark.sql import SparkSession
    >>> spark = SparkSession.builder.master("local").getOrCreate()
    >>> df = spark.createDataFrame([("Alice", 12), ("Bob", 14)], schema=["name", "age"])
    >>> ds = SparkDataFrame(df)
    >>> tfm = find_dataset_type_adapter(ds.get_type(), DatasetType.SPARK_DATA_FRAME)
    >>> from gai.v2.unify.transformer import IdentityTransformer
    >>> from gai.v2.unify.transformer import IdentityTransformer
    >>> isinstance(tfm, IdentityTransformer)
    True
    >>> ds = tfm.transform(ds)
    >>> ds.unwrap().show()
    +-----+---+
    | name|age|
    +-----+---+
    |Alice| 12|
    |  Bob| 14|
    +-----+---+
    <BLANKLINE>
    """

    def __init__(self, dtype: DatasetType, instance_name=None, pre_hooks=(), post_hooks=(),
                 type_adaptive=False):
        super().__init__(instance_name=instance_name, pre_hooks=pre_hooks, post_hooks=post_hooks,
                         type_adaptive=type_adaptive)
        self._dtype = dtype

    def get_input_type(self) -> DatasetType:
        return self._dtype

    def get_output_type(self) -> DatasetType:
        return self._dtype

    def _transform(self, dataset):
        assert dataset.get_type() == self.get_input_type()
        return dataset
