# from __future__ import annotations
import pandas as pd
import numpy as np
import tushare as ts
from tqdm import tqdm
import os
import math
import warnings
from collections.abc import Generator, Iterable
from functools import partial
import itertools
from itertools import islice, repeat, takewhile
from multiprocessing import cpu_count, Pool
import framework_config as config


class TushareDownloader:
    """Provides methods for retrieving daily stock data from
    tushare API
    Attributes
    ----------
        start_date : str
            start date of the data (modified from config.py)
        end_date : str
            end date of the data (modified from config.py)
        ticker_list : list
            a list of stock tickers (modified from config.py)
    Methods
    -------
    fetch_data()
        Fetches data from tushare API
    date：date
    Open: opening price
    High: the highest price
    Close: closing price
    Low: lowest price
    Volume: volume
    Price_change: price change
    P_change: fluctuation
    ma5: 5-day average price
    Ma10: 10 average daily price
    Ma20:20 average daily price
    V_ma5:5 daily average
    V_ma10:10 daily average
    V_ma20:20 daily average
    """

    def __init__(self, start_date: str, end_date: str, ticker_list: list):
        self.start_date = start_date
        self.end_date = end_date
        self.ticker_list = ticker_list

    def fetch_data(self) -> pd.DataFrame:
        """Fetches data from Yahoo API
        Parameters
        ----------
        Returns
        -------
        `pd.DataFrame`
            7 columns: A date, open, high, low, close, volume and tick symbol
            for the specified stock ticker
        """
        # Download and save the data in a pandas DataFrame:
        data_df = pd.DataFrame()
        for tic in tqdm(self.ticker_list, total=len(self.ticker_list)):
            temp_df = ts.get_hist_data(
                tic[0:6], start=self.start_date, end=self.end_date
            )
            temp_df["tic"] = tic[0:6]
            data_df = data_df.append(temp_df)
        data_df = data_df.reset_index(level="date")

        # create day of the week column (monday = 0)
        data_df = data_df.drop(
            [
                "price_change",
                "p_change",
                "ma5",
                "ma10",
                "ma20",
                "v_ma5",
                "v_ma10",
                "v_ma20",
            ],
            1,
        )
        data_df["day"] = pd.to_datetime(data_df["date"]).dt.dayofweek
        # rank desc
        data_df = data_df.sort_index(axis=0, ascending=False)
        data_df = data_df.reset_index(drop=True)
        # convert date to standard string format, easy to filter
        data_df["date"] = pd.to_datetime(data_df["date"])
        data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
        # drop missing data
        data_df = data_df.dropna()
        print("Shape of DataFrame: ", data_df.shape)
        # print("Display DataFrame: ", data_df.head())
        print(data_df)
        data_df = data_df.sort_values(by=["date", "tic"]).reset_index(drop=True)
        return data_df

    def select_equal_rows_stock(self, df):
        df_check = df.tic.value_counts()
        df_check = pd.DataFrame(df_check).reset_index()
        df_check.columns = ["tic", "counts"]
        mean_df = df_check.counts.mean()
        equal_list = list(df.tic.value_counts() >= mean_df)
        names = df.tic.value_counts().index
        select_stocks_list = list(names[equal_list])
        df = df[df.tic.isin(select_stocks_list)]
        return df

    def select_equal_rows_stock(self, df):
        df_check = df.tic.value_counts()
        df_check = pd.DataFrame(df_check).reset_index()
        df_check.columns = ["tic", "counts"]
        mean_df = df_check.counts.mean()
        equal_list = list(df.tic.value_counts() >= mean_df)
        names = df.tic.value_counts().index
        select_stocks_list = list(names[equal_list])
        df = df[df.tic.isin(select_stocks_list)]
        return df


def data_split(df, start, end):
    """
    split the dataset into training or testing using date
    :param data: (df) pandas dataframe, start, end
    :return: (df) pandas dataframe
    """
    data = df[(df.date >= start) & (df.date < end)]
    data = data.sort_values(["date", "tic"], ignore_index=True)
    data.index = data.date.factorize()[0]
    return data


n_cores = int(os.getenv("NUMBER_OF_CPUS") or cpu_count())
N_PROCESSES = max(1, n_cores // 2)


class DistributorBaseClass:
    """
    The distributor abstract base class.

    The main purpose of the instances of the DistributorBaseClass subclasses is to evaluate a function
    (called map_function) on a list of data items (called data).

    Dependent on the implementation of the distribute function, this is done in parallel or using a cluster of nodes.
    """

    def map_reduce(
            self,
            map_function,
            data,
            function_kwargs=None,
            chunk_size=None,
            data_length=None,
    ):
        """
        This method contains the core functionality of the DistributorBaseClass class.

        It maps the map_function to each element of the data and reduces the results to return a flattened list.

        It needs to be implemented for each of the subclasses.

        :param map_function: a function to apply to each data item.
        :type map_function: callable
        :param data: the data to use in the calculation
        :type data: iterable
        :param function_kwargs: parameters for the map function
        :type function_kwargs: dict of string to parameter
        :param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value.
        :type chunk_size: int
        :param data_length: If the data is a generator, you have to set the length here. If it is none, the
          length is deduced from the len of the data.
        :type data_length: int

        :return: the calculated results
        :rtype: list
        """
        raise NotImplementedError


def _function_with_partly_reduce(chunk_list, map_function, kwargs):
    """
    Small helper function to call a function (map_function)
    on a list of data chunks (chunk_list) and convert the results into
    a flattened list.

    This function is used to send chunks of data with a size larger than 1 to
    the workers in parallel and process these on the worker.

    :param chunk_list: A list of data chunks to process.
    :type chunk_list: list
    :param map_function: A function, which is called on each chunk in the list separately.
    :type map_function: callable

    :return: A list of the results of the function evaluated on each chunk and flattened.
    :rtype: list
    """
    kwargs = kwargs or {}
    results = (map_function(chunk, **kwargs) for chunk in chunk_list)
    results = list(itertools.chain.from_iterable(results))
    return results


class IterableDistributorBaseClass(DistributorBaseClass):
    """
    Distributor Base Class that can handle all iterable items and calculate
    a map_function on each item separately.

    This is done on chunks of the data, meaning, that the DistributorBaseClass classes will chunk the data into chunks,
    distribute the data and apply the map_function functions on the items separately.

    Dependent on the implementation of the distribute function, this is done in parallel or using a cluster of nodes.
    """

    @staticmethod
    def partition(data, chunk_size):
        """
        This generator partitions an iterable into slices of length `chunk_size`.
        If the chunk size is not a divider of the data length, the last slice will be shorter.

        Taken from
        https://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery

        The important part here is, that the iterable is only
        traversed once and the chunks are produced one at a time.
        This is good for both memory as well as speed.

        :param data: The data to partition.
        :type data: Iterable
        :param chunk_size: The chunk size. The last chunk might be smaller.
        :type chunk_size: int

        :return: A generator producing the chunks of data.
        :rtype: Generator[Iterable]
        """
        # Make sure we have an iterable
        iterator = iter(data)

        # takewhile(true, ...) generates an iterator until the items are empty
        # (= we have reached the end)
        # The islice(iterator, n) gets the next n elements from the iterator.
        # The list(...) makes sure we do not pass
        return takewhile(
            bool, (list(islice(iterator, chunk_size)) for _ in repeat(None))
        )

    def __init__(self):
        """
        Constructs the DistributorBaseClass class
        """
        raise NotImplementedError

    def calculate_best_chunk_size(self, data_length):
        """
        Calculates the best chunk size for a list of length data_length. The current implemented formula is more or
        less an empirical result for multiprocessing case on one machine.

        :param data_length: A length which defines how many calculations there need to be.
        :type data_length: int
        :return: the calculated chunk size
        :rtype: int
        """
        chunk_size, extra = divmod(data_length, self.n_workers * 5)
        if extra:
            chunk_size += 1
        return chunk_size

    def map_reduce(
            self,
            map_function,
            data,
            function_kwargs=None,
            chunk_size=None,
            data_length=None,
    ):
        """
        This method contains the core functionality of the DistributorBaseClass class.

        It maps the map_function to each element of the data and reduces the results to return a flattened list.

        How the jobs are calculated, is determined by the classes
        :func:`tsfresh.utilities.distribution.DistributorBaseClass.distribute` method,
        which can distribute the jobs in multiple threads, across multiple processing units etc.

        To not transport each element of the data individually, the data is split into chunks, according to the chunk
        size (or an empirical guess if none is given). By this, worker processes not tiny but adequate sized parts of
        the data.

        :param map_function: a function to apply to each data item.
        :type map_function: callable
        :param data: the data to use in the calculation
        :type data: iterable
        :param function_kwargs: parameters for the map function
        :type function_kwargs: dict of string to parameter
        :param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value.
        :type chunk_size: int
        :param data_length: If the data is a generator, you have to set the length here. If it is none, the
          length is deduced from the len of the data.
        :type data_length: int

        :return: the calculated results
        :rtype: list
        """
        if not isinstance(data, Iterable):
            raise ValueError(
                "You passed data, which can not be handled by this distributor!"
            )

        if data_length is None:
            data_length = len(data)

        if not chunk_size:
            chunk_size = self.calculate_best_chunk_size(data_length)

        chunk_generator = self.partition(data, chunk_size=chunk_size)

        map_kwargs = {"map_function": map_function, "kwargs": function_kwargs}

        if hasattr(self, "progressbar_title"):
            total_number_of_expected_results = math.ceil(data_length / chunk_size)
            result = tqdm(
                self.distribute(
                    _function_with_partly_reduce, chunk_generator, map_kwargs
                ),
                total=total_number_of_expected_results,
                desc=self.progressbar_title,
                disable=self.disable_progressbar,
            )
        else:
            result = (
                self.distribute(
                    _function_with_partly_reduce, chunk_generator, map_kwargs
                ),
            )

        result = list(itertools.chain.from_iterable(result))

        self.close()

        return result

    def distribute(self, func, partitioned_chunks, kwargs):
        """
        This abstract base function distributes the work among workers, which can be threads or nodes in a cluster.
        Must be implemented in the derived classes.

        :param func: the function to send to each worker.
        :type func: callable
        :param partitioned_chunks: The list of data chunks - each element is again
            a list of chunks - and should be processed by one worker.
        :type partitioned_chunks: iterable
        :param kwargs: parameters for the map function
        :type kwargs: dict of string to parameter

        :return: The result of the calculation as a list - each item should be the result of the application of func
            to a single element.
        """
        raise NotImplementedError

    def close(self):
        """
        Abstract base function to clean the DistributorBaseClass after use, e.g. close the connection to a DaskScheduler
        """
        pass


class MapDistributor(IterableDistributorBaseClass):
    """
    Distributor using the python build-in map, which calculates each job sequentially one after the other.
    """

    def __init__(
            self, disable_progressbar=False, progressbar_title="Feature Extraction"
    ):
        """
        Creates a new MapDistributor instance

        :param disable_progressbar: whether to show a progressbar or not.
        :type disable_progressbar: bool
        :param progressbar_title: the title of the progressbar
        :type progressbar_title: basestring
        """
        self.disable_progressbar = disable_progressbar
        self.progressbar_title = progressbar_title

    def distribute(self, func, partitioned_chunks, kwargs):
        """
        Calculates the features in a sequential fashion by pythons map command

        :param func: the function to send to each worker.
        :type func: callable
        :param partitioned_chunks: The list of data chunks - each element is again
            a list of chunks - and should be processed by one worker.
        :type partitioned_chunks: iterable
        :param kwargs: parameters for the map function
        :type kwargs: dict of string to parameter

        :return: The result of the calculation as a list - each item should be the result of the application of func
            to a single element.
        """
        return map(partial(func, **kwargs), partitioned_chunks)

    def calculate_best_chunk_size(self, data_length):
        """
        For the map command, which calculates the features sequentially, a the chunk_size of 1 will be used.

        :param data_length: A length which defines how many calculations there need to be.
        :type data_length: int
        """
        return 1


def initialize_warnings_in_workers(show_warnings):
    """
    Small helper function to initialize warnings module in multiprocessing workers.

    On Windows, Python spawns fresh processes which do not inherit from warnings
    state, so warnings must be enabled/disabled before running computations.

    :param show_warnings: whether to show warnings or not.
    :type show_warnings: bool
    """
    warnings.catch_warnings()
    if not show_warnings:
        warnings.simplefilter("ignore")
    else:
        warnings.simplefilter("default")


class MultiprocessingDistributor(IterableDistributorBaseClass):
    """
    Distributor using a multiprocessing Pool to calculate the jobs in parallel on the local machine.
    """

    def __init__(
            self,
            n_workers,
            disable_progressbar=False,
            progressbar_title="Feature Extraction",
            show_warnings=True,
    ):
        """
        Creates a new MultiprocessingDistributor instance

        :param n_workers: How many workers should the multiprocessing pool have?
        :type n_workers: int
        :param disable_progressbar: whether to show a progressbar or not.
        :type disable_progressbar: bool
        :param progressbar_title: the title of the progressbar
        :type progressbar_title: basestring
        :param show_warnings: whether to show warnings or not.
        :type show_warnings: bool
        """
        self.pool = Pool(
            processes=n_workers,
            initializer=initialize_warnings_in_workers,
            initargs=(show_warnings,),
        )
        self.n_workers = n_workers
        self.disable_progressbar = disable_progressbar
        self.progressbar_title = progressbar_title

    def distribute(self, func, partitioned_chunks, kwargs):
        """
        Calculates the features in a parallel fashion by distributing the map command to a thread pool

        :param func: the function to send to each worker.
        :type func: callable
        :param partitioned_chunks: The list of data chunks - each element is again
            a list of chunks - and should be processed by one worker.
        :type partitioned_chunks: iterable
        :param kwargs: parameters for the map function
        :type kwargs: dict of string to parameter

        :return: The result of the calculation as a list - each item should be the result of the application of func
            to a single element.
        """
        return self.pool.imap_unordered(partial(func, **kwargs), partitioned_chunks)

    def close(self):
        """
        Collects the result from the workers and closes the thread pool.
        """
        self.pool.close()
        self.pool.terminate()
        self.pool.join()


def _roll_out_time_series(
        timeshift,
        grouped_data,
        rolling_direction,
        max_timeshift,
        min_timeshift,
        column_sort,
        column_id,
):
    """
    Internal helper function for roll_time_series.
    This function has the task to extract the rolled forecast data frame of the number `timeshift`.
    This means it has shifted a virtual window if size `max_timeshift` (or infinite)
    `timeshift` times in the positive direction (for positive `rolling_direction`) or in negative direction
    (for negative `rolling_direction`).
    It starts counting from the first data point for each id (and kind) (or the last one for negative
    `rolling_direction`).
    The rolling happens for each `id` and `kind` separately.
    Extracted data smaller than `min_timeshift` + 1 are removed.

    Implementation note:
    Even though negative rolling direction means, we let the window shift in negative direction over the data,
    the counting of `timeshift` still happens from the first row onwards. Example:

        1   2   3   4

    If we do positive rolling, we extract the sub time series

      [ 1 ]               input parameter: timeshift=1, new id: ([id=]X,[timeshift=]1)
      [ 1   2 ]           input parameter: timeshift=2, new id: ([id=]X,[timeshift=]2)
      [ 1   2   3 ]       input parameter: timeshift=3, new id: ([id=]X,[timeshift=]3)
      [ 1   2   3   4 ]   input parameter: timeshift=4, new id: ([id=]X,[timeshift=]4)

    If we do negative rolling:

      [ 1   2   3   4 ]   input parameter: timeshift=1, new id: ([id=]X,[timeshift=]1)
          [ 2   3   4 ]   input parameter: timeshift=2, new id: ([id=]X,[timeshift=]2)
              [ 3   4 ]   input parameter: timeshift=3, new id: ([id=]X,[timeshift=]3)
                  [ 4 ]   input parameter: timeshift=4, new id: ([id=]X,[timeshift=]4)

    If you now reverse the order of the negative examples, it looks like shifting the
    window from the back (but it is implemented to start counting from the beginning).

    """

    def _f(x):
        if rolling_direction > 0:
            # For positive rolling, the right side of the window moves with `timeshift`
            shift_until = timeshift
            shift_from = max(shift_until - max_timeshift - 1, 0)

            df_temp = x.iloc[shift_from:shift_until] if shift_until <= len(x) else None
        else:
            # For negative rolling, the left side of the window moves with `timeshift`
            shift_from = max(timeshift - 1, 0)
            shift_until = shift_from + max_timeshift + 1

            df_temp = x.iloc[shift_from:shift_until]

        if df_temp is None or len(df_temp) < min_timeshift + 1:
            return

        df_temp = df_temp.copy()

        # and set the shift correctly
        if column_sort and rolling_direction > 0:
            timeshift_value = df_temp[column_sort].iloc[-1]
        elif column_sort and rolling_direction < 0:
            timeshift_value = df_temp[column_sort].iloc[0]
        else:
            timeshift_value = timeshift - 1
        # and now create new ones ids out of the old ones
        df_temp["id"] = df_temp[column_id].apply(lambda row: (row, timeshift_value))

        return df_temp

    return [grouped_data.apply(_f)]


def roll_time_series(
        df_or_dict,
        column_id,
        column_sort=None,
        column_kind=None,
        rolling_direction=1,
        max_timeshift=None,
        min_timeshift=0,
        chunksize=config.CHUNKSIZE,
        n_jobs=N_PROCESSES,
        show_warnings=config.SHOW_WARNINGS,
        disable_progressbar=config.DISABLE_PROGRESSBAR,
        distributor=None,
):
    """
    This method creates sub windows of the time series. It rolls the (sorted) data frames for each kind and each id
    separately in the "time" domain (which is represented by the sort order of the sort column given by `column_sort`).

    For each rolling step, a new id is created by the scheme ({id}, {shift}), here id is the former id of
    the column and shift is the amount of "time" shifts.
    You can think of it as having a window of fixed length (the max_timeshift) moving one step at a time over
    your time series.
    Each cut-out seen by the window is a new time series with a new identifier.

    A few remarks:

     * This method will create new IDs!
     * The sign of rolling defines the direction of time rolling, a positive value means we are shifting
       the cut-out window foreward in time. The name of each new sub time series is given by the last time point.
       This means, the time series named `([id=]4,[timeshift=]5)` with a `max_timeshift` of 3 includes the data
       of the times 3, 4 and 5.
       A negative rolling direction means, you go in negative time direction over your data.
       The time series named `([id=]4,[timeshift=]5)` with `max_timeshift` of 3 would then include the data
       of the times 5, 6 and 7.
       The absolute value defines how much time to shift at each step.
     * It is possible to shift time series of different lengths, but:
     * We assume that the time series are uniformly sampled
     * For more information, please see :ref:`forecasting-label`.

    :param df_or_dict: a pandas DataFrame or a dictionary. The required shape/form of the object depends on the rest of
        the passed arguments.
    :type df_or_dict: pandas.DataFrame or dict

    :param column_id: it must be present in the pandas DataFrame or in all DataFrames in the dictionary.
        It is not allowed to have NaN values in this column.
    :type column_id: basestring

    :param column_sort: if not None, sort the rows by this column. It is not allowed to
        have NaN values in this column. If not given, will be filled by an increasing number,
        meaning that the order of the passed dataframes are used as "time" for the time series.
    :type column_sort: basestring or None

    :param column_kind: It can only be used when passing a pandas DataFrame (the dictionary is already assumed to be
        grouped by the kind). Is must be present in the DataFrame and no NaN values are allowed.
        If the kind column is not passed, it is assumed that each column in the pandas DataFrame (except the id or
        sort column) is a possible kind.
    :type column_kind: basestring or None

    :param rolling_direction: The sign decides, if to shift our cut-out window backwards or forwards in "time".
        The absolute value decides, how much to shift at each step.
    :type rolling_direction: int

    :param max_timeshift: If not None, the cut-out window is at maximum `max_timeshift` large. If none, it grows
         infinitely.
    :type max_timeshift: int

    :param min_timeshift: Throw away all extracted forecast windows smaller or equal than this. Must be larger
         than or equal 0.
    :type min_timeshift: int

    :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
    :type n_jobs: int

    :param chunksize: How many shifts per job should be calculated.
    :type chunksize: None or int

    :param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
    :type show_warnings: bool

    :param disable_progressbar: Do not show a progressbar while doing the calculation.
    :type disable_progressbar: bool

    :param distributor: Advanced parameter: set this to a class name that you want to use as a
             distributor. See the utilities/distribution.py for more information. Leave to None, if you want
             TSFresh to choose the best distributor.
    :type distributor: class

    :return: The rolled data frame or dictionary of data frames
    :rtype: the one from df_or_dict
    """
    if rolling_direction == 0:
        raise ValueError("Rolling direction of 0 is not possible")

    if max_timeshift is not None and max_timeshift <= 0:
        raise ValueError("max_timeshift needs to be positive!")

    if min_timeshift < 0:
        raise ValueError("min_timeshift needs to be positive or zero!")

    if isinstance(df_or_dict, dict):
        if column_kind is not None:
            raise ValueError(
                "You passed in a dictionary and gave a column name for the kind. Both are not possible."
            )

        return {
            key: roll_time_series(
                df_or_dict=df_or_dict[key],
                column_id=column_id,
                column_sort=column_sort,
                column_kind=column_kind,
                rolling_direction=rolling_direction,
                max_timeshift=max_timeshift,
                min_timeshift=min_timeshift,
                chunksize=chunksize,
                n_jobs=n_jobs,
                show_warnings=show_warnings,
                disable_progressbar=disable_progressbar,
                distributor=distributor,
            )
            for key in df_or_dict
        }

    # Now we know that this is a pandas data frame
    df = df_or_dict

    if len(df) <= 1:
        raise ValueError(
            "Your time series container has zero or one rows!. Can not perform rolling."
        )

    if column_id is not None:
        if column_id not in df:
            raise AttributeError(
                "The given column for the id is not present in the data."
            )
    else:
        raise ValueError(
            "You have to set the column_id which contains the ids of the different time series"
        )

    if column_kind is not None:
        grouper = [column_kind, column_id]
    else:
        grouper = [
            column_id,
        ]

    if column_sort is not None:
        # Require no Nans in column
        if df[column_sort].isnull().any():
            raise ValueError("You have NaN values in your sort column.")

        df = df.sort_values(column_sort)

        if df[column_sort].dtype != np.object:
            # if rolling is enabled, the data should be uniformly sampled in this column
            # Build the differences between consecutive time sort values

            differences = df.groupby(grouper)[column_sort].apply(
                lambda x: x.values[:-1] - x.values[1:]
            )
            # Write all of them into one big list
            differences = sum(map(list, differences), [])
            # Test if all differences are the same
            if differences and min(differences) != max(differences):
                print(
                    "Your time stamps are not uniformly sampled, which makes rolling "
                    "nonsensical in some domains."
                )

    # Roll the data frames if requested
    rolling_amount = np.abs(rolling_direction)
    rolling_direction = np.sign(rolling_direction)

    grouped_data = df.groupby(grouper)
    prediction_steps = grouped_data.count().max().max()

    max_timeshift = max_timeshift or prediction_steps

    if column_sort is None:
        df["sort"] = range(df.shape[0])

    if rolling_direction > 0:
        range_of_shifts = list(reversed(range(prediction_steps, 0, -rolling_amount)))
    else:
        range_of_shifts = range(1, prediction_steps + 1, rolling_amount)

    if distributor is None:
        if n_jobs == 0 or n_jobs == 1:
            distributor = MapDistributor(
                disable_progressbar=disable_progressbar, progressbar_title="Rolling"
            )
        else:
            distributor = MultiprocessingDistributor(
                n_workers=n_jobs,
                disable_progressbar=disable_progressbar,
                progressbar_title="Rolling",
                show_warnings=show_warnings,
            )

    if not isinstance(distributor, DistributorBaseClass):
        raise ValueError("the passed distributor is not an DistributorBaseClass object")

    kwargs = {
        "grouped_data": grouped_data,
        "rolling_direction": rolling_direction,
        "max_timeshift": max_timeshift,
        "min_timeshift": min_timeshift,
        "column_sort": column_sort,
        "column_id": column_id,
    }

    shifted_chunks = distributor.map_reduce(
        _roll_out_time_series,
        data=range_of_shifts,
        chunk_size=chunksize,
        function_kwargs=kwargs,
    )

    distributor.close()

    df_shift = pd.concat(shifted_chunks, ignore_index=True)

    return df_shift.sort_values(by=["id", column_sort or "sort"])
