import numpy as np
import pandas as pd
import itertools
import re
from gym.utils import seeding
from gym import spaces
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import gym
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
import framework_config as config
from stable_baselines3 import DDPG, A2C, PPO, TD3, SAC
import empyrical as ep
import scipy.stats as stats


class FeatureEngineer:
    """Provides methods for preprocessing the stock price data

    Attributes
    ----------
        use_technical_indicator : boolean
            we technical indicator or not
        tech_indicator_list : list
            a list of technical indicator names (modified from config.py)
        use_turbulence : boolean
            use turbulence index or not
        user_defined_feature:boolean
            user user defined features or not

    Methods
    -------
    preprocess_data()
        main method to do the feature engineering

    """

    def __init__(
            self,
            use_technical_indicator=True,
            tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
            use_turbulence=False,
            user_defined_feature=False,
    ):
        self.use_technical_indicator = use_technical_indicator
        self.tech_indicator_list = tech_indicator_list
        self.use_turbulence = use_turbulence
        self.user_defined_feature = user_defined_feature

    def preprocess_data(self, df):
        """main method to do the feature engineering
        @:param config: source dataframe
        @:return: a DataMatrices object
        """

        if self.use_technical_indicator == True:
            # add technical indicators using stockstats
            df = self.add_technical_indicator(df)
            print("Successfully added technical indicators")

        # add turbulence index for multiple stock
        if self.use_turbulence == True:
            df = self.add_turbulence(df)
            print("Successfully added turbulence index")

        # add user defined feature
        if self.user_defined_feature == True:
            df = self.add_user_defined_feature(df)
            print("Successfully added user defined features")

        # fill the missing values at the beginning and the end
        df = df.fillna(method="bfill").fillna(method="ffill")
        return df

    def add_technical_indicator(self, data):
        """
        calcualte technical indicators
        use stockstats package to add technical inidactors
        :param data: (df) pandas dataframe
        :return: (df) pandas dataframe
        """
        df = data.copy()
        stock = StockDataFrame.retype(df.copy())
        unique_ticker = stock.tic.unique()

        for indicator in self.tech_indicator_list:
            indicator_df = pd.DataFrame()
            for i in range(len(unique_ticker)):
                try:
                    temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
                    temp_indicator = pd.DataFrame(temp_indicator)
                    indicator_df = indicator_df.append(
                        temp_indicator, ignore_index=True
                    )
                except Exception as e:
                    print(e)
            df[indicator] = indicator_df
        return df

    def add_user_defined_feature(self, data):
        """
         add user defined features
        :param data: (df) pandas dataframe
        :return: (df) pandas dataframe
        """
        df = data.copy()
        df["daily_return"] = df.close.pct_change(1)
        # df['return_lag_1']=df.close.pct_change(2)
        # df['return_lag_2']=df.close.pct_change(3)
        # df['return_lag_3']=df.close.pct_change(4)
        # df['return_lag_4']=df.close.pct_change(5)
        return df

    def add_turbulence(self, data):
        """
        add turbulence index from a precalcualted dataframe
        :param data: (df) pandas dataframe
        :return: (df) pandas dataframe
        """
        df = data.copy()
        turbulence_index = self.calculate_turbulence(df)
        df = df.merge(turbulence_index, on="date")
        df = df.sort_values(["date", "tic"]).reset_index(drop=True)
        return df

    def calculate_turbulence(self, data):
        """calculate turbulence index based on dow 30"""
        # can add other market assets
        df = data.copy()
        df_price_pivot = df.pivot(index="date", columns="tic", values="close")
        # use returns to calculate turbulence
        df_price_pivot = df_price_pivot.pct_change()

        unique_date = df.date.unique()
        # start after a year
        start = 252
        turbulence_index = [0] * start
        # turbulence_index = [0]
        count = 0
        for i in range(start, len(unique_date)):
            current_price = df_price_pivot[df_price_pivot.index == unique_date[i]]
            # use one year rolling window to calcualte covariance
            hist_price = df_price_pivot[
                (df_price_pivot.index < unique_date[i])
                & (df_price_pivot.index >= unique_date[i - 252])
                ]
            # Drop tickers which has number missing values more than the "oldest" ticker
            filtered_hist_price = hist_price.iloc[hist_price.isna().sum().min():].dropna(axis=1)

            cov_temp = filtered_hist_price.cov()
            current_temp = current_price[[x for x in filtered_hist_price]] - np.mean(filtered_hist_price, axis=0)
            temp = current_temp.values.dot(np.linalg.pinv(cov_temp)).dot(
                current_temp.values.T
            )
            if temp > 0:
                count += 1
                if count > 2:
                    turbulence_temp = temp[0][0]
                else:
                    # avoid large outlier because of the calculation just begins
                    turbulence_temp = 0
            else:
                turbulence_temp = 0
            turbulence_index.append(turbulence_temp)

        turbulence_index = pd.DataFrame(
            {"date": df_price_pivot.index, "turbulence": turbulence_index}
        )
        return turbulence_index


def wrap(df, index_column=None):
    """ wraps a pandas DataFrame to StockDataFrame

    :param df: pandas DataFrame
    :param index_column: the name of the index column, default to ``date``
    :return: an object of StockDataFrame
    """
    return StockDataFrame.retype(df, index_column)


class StockDataFrame(pd.DataFrame):
    # Start of options.
    KDJ_PARAM = (2.0 / 3.0, 1.0 / 3.0)
    KDJ_WINDOW = 9

    BOLL_PERIOD = 20
    BOLL_STD_TIMES = 2

    MACD_EMA_SHORT = 12
    MACD_EMA_LONG = 26
    MACD_EMA_SIGNAL = 9

    PPO_EMA_SHORT = 12
    PPO_EMA_LONG = 26
    PPO_EMA_SIGNAL = 9

    PDI_SMMA = 14
    MDI_SMMA = 14
    DX_SMMA = 14
    ADX_EMA = 6
    ADXR_EMA = 6

    CR_MA1 = 5
    CR_MA2 = 10
    CR_MA3 = 20

    TRIX_EMA_WINDOW = 12

    TEMA_EMA_WINDOW = 5

    ATR_SMMA = 14

    SUPERTREND_MUL = 3
    SUPERTREND_WINDOW = 14

    VWMA = 14

    CHOP = 14

    MFI = 14

    CCI = 14

    RSI = 14

    VR = 26

    WR = 14

    WAVE_TREND_1 = 10
    WAVE_TREND_2 = 21

    KAMA_SLOW = 34
    KAMA_FAST = 5

    MULTI_SPLIT_INDICATORS = ("kama",)

    # End of options

    @staticmethod
    def _change(series, window):
        return series.pct_change(periods=-window).fillna(0.0) * 100

    def _get_change(self):
        """ Get the percentage change column

        :return: result series
        """
        self['change'] = self._change(self['close'], -1)

    def _get_p(self, column, shifts):
        """ get the permutation of specified range

        example:
        index    x   x_-2,-1_p
        0        1         NaN
        1       -1         NaN
        2        3           2  (0.x > 0, and assigned to weight 2)
        3        5           1  (2.x > 0, and assigned to weight 1)
        4        1           3

        :param column: the column to calculate p from
        :param shifts: the range to consider
        :return:
        """
        column_name = '{}_{}_p'.format(column, shifts)
        # initialize the column if not
        self.get(column)
        shifts = self.to_ints(shifts)[::-1]
        indices = None
        count = 0
        for shift in shifts:
            shifted = self.shift(-shift)
            index = (shifted[column] > 0) * (2 ** count)
            if indices is None:
                indices = index
            else:
                indices += index
            count += 1
        if indices is not None:
            cp = indices.copy()
            self.set_nan(cp, shifts)
            self[column_name] = cp

    def to_ints(self, shifts):
        items = map(self._process_shifts_segment, shifts.split(','))
        return sorted(list(set(itertools.chain(*items))))

    def to_int(self, shifts):
        numbers = self.to_ints(shifts)
        if len(numbers) != 1:
            raise IndexError("only accept 1 number.")
        return numbers[0]

    @staticmethod
    def _process_shifts_segment(shift_segment):
        if '~' in shift_segment:
            start, end = shift_segment.split('~')
            shifts = range(int(start), int(end) + 1)
        else:
            shifts = [int(shift_segment)]
        return shifts

    @staticmethod
    def set_nan(pd_obj, shift):
        try:
            iter(shift)
            max_shift = max(shift)
            min_shift = min(shift)
            StockDataFrame._set_nan_of_single_shift(pd_obj, max_shift)
            StockDataFrame._set_nan_of_single_shift(pd_obj, min_shift)
        except TypeError:
            # shift is not iterable
            StockDataFrame._set_nan_of_single_shift(pd_obj, shift)

    @staticmethod
    def _set_nan_of_single_shift(pd_obj, shift):
        val = np.nan
        if shift > 0:
            pd_obj.iloc[-shift:] = val
        elif shift < 0:
            pd_obj.iloc[:-shift] = val

    def _get_r(self, column, shifts):
        """ Get rate of change of column

        :param column: column name of the rate to calculate
        :param shifts: periods to shift, accept one shift only
        :return: None
        """
        shift = self.to_int(shifts)
        rate_key = '{}_{}_r'.format(column, shift)
        self[rate_key] = self._change(self[column], shift)

    @staticmethod
    def _shift(series, window):
        """ Shift the series

        When window is negative, shift the past period to current.
        Fill the gap with the first data available.

        When window is positive, shift the future period to current.
        Fill the gap with last data available.

        :param series: the series to shift
        :param window: number of periods to shift
        :return: the shifted series with filled gap
        """
        ret = series.shift(-window)
        if window < 0:
            ret.iloc[:-window] = series.iloc[0]
        elif window > 0:
            ret.iloc[-window:] = series.iloc[-1]
        return ret

    def _get_s(self, column, shifts):
        """ Get the column shifted by periods

        :param column: name of the column to shift
        :param shifts: periods to shift, accept one shift only
        :return: None
        """
        shift = self.to_int(shifts)
        shifted_key = "{}_{}_s".format(column, shift)
        self[shifted_key] = self._shift(self[column], shift)

    def _get_log_ret(self):
        close = self['close']
        self['log-ret'] = np.log(close / self._shift(close, -1))

    def _get_c(self, column, shifts):
        """ get the count of column in range (shifts)

        example: change_20_c

        :param column: column name
        :param shifts: range to count, only to previous
        :return: result series
        """
        column_name = '{}_{}_c'.format(column, shifts)
        shifts = self.get_int_positive(shifts)
        self[column_name] = self[column].rolling(
            center=False,
            window=shifts,
            min_periods=0).apply(np.count_nonzero)
        return self[column_name]

    def _get_fc(self, column, shifts):
        """ get the count of column in range of future (shifts)

        example: change_20_fc

        :param column: column name
        :param shifts: range to count, only to future
        :return: result series
        """
        column_name = '{}_{}_fc'.format(column, shifts)
        shift = self.get_int_positive(shifts)
        reversed_series = self[column][::-1]
        reversed_counts = reversed_series.rolling(
            center=False,
            window=shift,
            min_periods=0).apply(np.count_nonzero)
        counts = reversed_counts[::-1]
        self[column_name] = counts
        return counts

    def _init_shifted_columns(self, column, shifts):
        # initialize the column if not
        self.get(column)
        shifts = self.to_ints(shifts)
        shift_column_names = ['{}_{}_s'.format(column, shift) for shift in
                              shifts]
        [self.get(name) for name in shift_column_names]
        return shift_column_names

    def _get_max(self, column, shifts):
        column_name = '{}_{}_max'.format(column, shifts)
        shift_column_names = self._init_shifted_columns(column, shifts)
        self[column_name] = np.max(self[shift_column_names], axis=1)

    def _get_min(self, column, shifts):
        column_name = '{}_{}_min'.format(column, shifts)
        shift_column_names = self._init_shifted_columns(column, shifts)
        self[column_name] = np.min(self[shift_column_names], axis=1)

    def _get_rsv(self, window):
        """ Calculate the RSV (Raw Stochastic Value) within N periods

        This value is essential for calculating KDJs
        Current day is included in N

        :param window: number of periods
        :return: None
        """
        window = self.get_int_positive(window)
        column_name = 'rsv_{}'.format(window)
        low_min = self._mov_min(self['low'], window)
        high_max = self._mov_max(self['high'], window)

        cv = (self['close'] - low_min) / (high_max - low_min)
        self[column_name] = cv.fillna(0.0) * 100

    def _get_rsi(self, window=None):
        """ Calculate the RSI (Relative Strength Index) within N periods

        calculated based on the formula at:
        https://en.wikipedia.org/wiki/Relative_strength_index

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.RSI
            column_name = 'rsi'
        else:
            column_name = 'rsi_{}'.format(window)
        window = self.get_int_positive(window)

        change = self._delta(self['close'], -1)
        close_pm = (change + change.abs()) / 2
        close_nm = (-change + change.abs()) / 2
        p_ema = self._smma(close_pm, window)
        n_ema = self._smma(close_nm, window)

        rs_column_name = 'rs_{}'.format(window)
        self[rs_column_name] = rs = p_ema / n_ema
        self[column_name] = 100 - 100 / (1.0 + rs)

    def _get_stochrsi(self, window=None):
        """ Calculate the Stochastic RSI

        calculated based on the formula at:
        https://www.investopedia.com/terms/s/stochrsi.asp

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.RSI
            column_name = 'stochrsi'
        else:
            column_name = 'stochrsi_{}'.format(window)
        window = self.get_int_positive(window)

        rsi = self['rsi_{}'.format(window)]
        rsi_min = self._mov_min(rsi, window)
        rsi_max = self._mov_max(rsi, window)

        cv = (rsi - rsi_min) / (rsi_max - rsi_min)
        self[column_name] = cv * 100

    def _get_wave_trend(self):
        """ Calculate LazyBear's Wavetrend
        Check the algorithm described below:
        https://medium.com/@samuel.mcculloch/lets-take-a-look-at-wavetrend-with-crosses-lazybear-s-indicator-2ece1737f72f

        n1: period of EMA on typical price
        n2: period of EMA

        :return: None
        """
        n1 = self.WAVE_TREND_1
        n2 = self.WAVE_TREND_2

        tp = self._tp()
        esa = self._ema(tp, n1)
        d = self._ema((tp - esa).abs(), n1)
        ci = (tp - esa) / (0.015 * d)
        tci = self._ema(ci, n2)
        self["wt1"] = tci
        self["wt2"] = self._sma(tci, 4)

    @staticmethod
    def _smma(series, window):
        return series.ewm(
            ignore_na=False,
            alpha=1.0 / window,
            min_periods=0,
            adjust=True).mean()

    def _get_smma(self, column, windows):
        """ get smoothed moving average.

        :param column: the column to calculate
        :param windows: range
        :return: result series
        """
        window = self.get_int_positive(windows)
        column_name = '{}_{}_smma'.format(column, window)
        self[column_name] = self._smma(self[column], window)

    def _get_trix(self, column=None, windows=None):
        """ Triple Exponential Average

        https://www.investopedia.com/articles/technical/02/092402.asp

        :param column: the column to calculate
        :param windows: range
        :return: result series
        """
        column_name = ""
        if column is None and windows is None:
            column_name = 'trix'
        if column is None:
            column = 'close'
        if windows is None:
            windows = self.TRIX_EMA_WINDOW
        if column_name == "":
            column_name = '{}_{}_trix'.format(column, windows)

        window = self.get_int_positive(windows)

        single = self._ema(self[column], window)
        double = self._ema(single, window)
        triple = self._ema(double, window)
        prev_triple = self._shift(triple, -1)
        triple_change = self._delta(triple, -1)
        self[column_name] = triple_change * 100 / prev_triple

    def _get_tema(self, column=None, windows=None):
        """ Another implementation for triple ema

        Check the algorithm described below:
        https://www.forextraders.com/forex-education/forex-technical-analysis/triple-exponential-moving-average-the-tema-indicator/

        :param column: column to calculate ema
        :param windows: window of the calculation
        :return: result series
        """
        column_name = ""
        if column is None and windows is None:
            column_name = 'tema'
        if column is None:
            column = 'close'
        if windows is None:
            windows = self.TEMA_EMA_WINDOW
        if column_name == "":
            column_name = '{}_{}_tema'.format(column, windows)

        window = self.get_int_positive(windows)

        single = self._ema(self[column], window)
        double = self._ema(single, window)
        triple = self._ema(double, window)
        self[column_name] = 3 * single - 3 * double + triple

    def _get_wr(self, window=None):
        """ Williams Overbought/Oversold Index

        Definition: https://www.investopedia.com/terms/w/williamsr.asp
        WMS=[(Hn—Ct)/(Hn—Ln)] × -100
        Ct - the close price
        Hn - N periods high
        Ln - N periods low

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.WR
            column_name = 'wr'
        else:
            column_name = 'wr_{}'.format(window)

        window = self.get_int_positive(window)
        ln = self._mov_min(self['low'], window)

        hn = self._mov_max(self['high'], window)
        self[column_name] = (hn - self['close']) / (hn - ln) * -100

    def _get_cci(self, window=None):
        """ Commodity Channel Index

        CCI = (Typical Price  -  20-period SMA of TP) / (.015 x Mean Deviation)
        Typical Price (TP) = (High + Low + Close)/3
        TP is also implemented as 'middle'.

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.CCI
            column_name = 'cci'
        else:
            column_name = 'cci_{}'.format(window)
        window = self.get_int_positive(window)

        tp = self._tp()
        tp_sma = self._sma(tp, window)
        rolling = tp.rolling(min_periods=1, center=False, window=window)
        md = rolling.apply(lambda x: np.fabs(x - x.mean()).mean())

        self[column_name] = (tp - tp_sma) / (.015 * md)

    def _tr(self):
        prev_close = self._shift(self['close'], -1)
        high = self['high']
        low = self['low']
        c1 = high - low
        c2 = (high - prev_close).abs()
        c3 = (low - prev_close).abs()
        return pd.concat((c1, c2, c3), axis=1).max(axis=1)

    def _get_tr(self):
        """ True Range of the trading

         TR is a measure of volatility of a High-Low-Close series

        tr = max[(high - low), abs(high - close_prev), abs(low - close_prev)]

        :return: None
        """
        self['tr'] = self._tr()

    def _get_supertrend(self, window=None):
        """ Supertrend

        Supertrend indicator shows trend direction.
        It provides buy or sell indicators.
        https://medium.com/codex/step-by-step-implementation-of-the-supertrend-indicator-in-python-656aa678c111

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.SUPERTREND_WINDOW
        window = self.get_int_positive(window)

        high = self['high']
        low = self['low']
        close = self['close']
        m_atr = self.SUPERTREND_MUL * self._atr(window)
        hl_avg = (high + low) / 2.0
        # basic upper band
        b_ub = hl_avg + m_atr
        # basic lower band
        b_lb = hl_avg - m_atr

        size = len(close)
        ub = np.empty(size, dtype=np.float64)
        lb = np.empty(size, dtype=np.float64)
        st = np.empty(size, dtype=np.float64)

        for i in range(size):
            if i == 0:
                ub[i] = b_ub.iloc[i]
                lb[i] = b_lb.iloc[i]
                if close.iloc[i] <= ub[i]:
                    st[i] = ub[i]
                else:
                    st[i] = lb[i]
                continue

            last_close = close.iloc[i - 1]
            curr_close = close.iloc[i]
            last_ub = ub[i - 1]
            last_lb = lb[i - 1]
            last_st = st[i - 1]
            curr_b_ub = b_ub.iloc[i]
            curr_b_lb = b_lb.iloc[i]

            # calculate current upper band
            if curr_b_ub < last_ub or last_close > last_ub:
                ub[i] = curr_b_ub
            else:
                ub[i] = last_ub

            # calculate current lower band
            if curr_b_lb > last_lb or last_close < last_lb:
                lb[i] = curr_b_lb
            else:
                lb[i] = last_lb

            # calculate supertrend
            if last_st == last_ub:
                if curr_close <= ub[i]:
                    st[i] = ub[i]
                else:
                    st[i] = lb[i]
            elif last_st == last_lb:
                if curr_close > lb[i]:
                    st[i] = lb[i]
                else:
                    st[i] = ub[i]

        self['supertrend_ub'] = ub
        self['supertrend_lb'] = lb
        self['supertrend'] = st

    def _atr(self, window):
        tr = self._tr()
        return self._smma(tr, window)

    def _get_atr(self, window=None):
        """ Average True Range

        The average true range is an N-day smoothed moving average (SMMA) of
        the true range values.  Default to 14 periods.
        https://en.wikipedia.org/wiki/Average_true_range

        :param window: number of periods
        :return: None
        """
        if window is None:
            window = self.ATR_SMMA
            column_name = 'atr'
        else:
            column_name = 'atr_{}'.format(window)
        window = self.get_int_positive(window)
        self[column_name] = self._atr(window)

    def _get_dma(self):
        """ Difference of Moving Average

        default to 10 and 50.

        :return: None
        """
        self['dma'] = self['close_10_sma'] - self['close_50_sma']

    def _get_dmi(self):
        """ get the default setting for DMI

        including:
        +DI: 14 periods SMMA of +DM,
        -DI: 14 periods SMMA of -DM,
        DX: based on +DI and -DI
        ADX: 6 periods SMMA of DX

        :return:
        """
        self['pdi'] = self._get_pdi(self.PDI_SMMA)
        self['mdi'] = self._get_mdi(self.MDI_SMMA)
        self['dx'] = self._get_dx(self.DX_SMMA)
        self['adx'] = self._ema(self['dx'], self.ADX_EMA)
        self['adxr'] = self._ema(self['adx'], self.ADXR_EMA)

    def _get_um_dm(self):
        """ Up move and down move

        initialize up move and down move
        """
        hd = self['high_delta']
        self['um'] = (hd + hd.abs()) / 2
        ld = -self['low_delta']
        self['dm'] = (ld + ld.abs()) / 2

    def _get_pdm(self, windows):
        """ +DM, positive directional moving

        If window is not 1, calculate the SMMA of +DM

        :param windows: range
        :return:
        """
        window = self.get_int_positive(windows)
        column_name = 'pdm_{}'.format(window)
        um, dm = self['um'], self['dm']
        self['pdm'] = np.where(um > dm, um, 0)
        if window > 1:
            pdm = self['pdm_{}_ema'.format(window)]
        else:
            pdm = self['pdm']
        self[column_name] = pdm

    def _get_vr(self, windows=None):
        if windows is None:
            window = self.VR
            column_name = 'vr'
        else:
            window = self.get_int_positive(windows)
            column_name = 'vr_{}'.format(window)

        idx = self.index
        gt_zero = np.where(self['change'] > 0, self['volume'], 0)
        av = pd.Series(gt_zero, index=idx)
        avs = self._mov_sum(av, window)

        lt_zero = np.where(self['change'] < 0, self['volume'], 0)
        bv = pd.Series(lt_zero, index=idx)
        bvs = self._mov_sum(bv, window)

        eq_zero = np.where(self['change'] == 0, self['volume'], 0)
        cv = pd.Series(eq_zero, index=idx)
        cvs = self._mov_sum(cv, window)

        self[column_name] = (avs + cvs / 2) / (bvs + cvs / 2) * 100

    def _get_mdm(self, windows):
        """ -DM, negative directional moving accumulation

        If window is not 1, return the SMA of -DM.

        :param windows: range
        :return:
        """
        window = self.get_int_positive(windows)
        column_name = 'mdm_{}'.format(window)
        um, dm = self['um'], self['dm']
        self['mdm'] = np.where(dm > um, dm, 0)
        if window > 1:
            mdm = self['mdm_{}_ema'.format(window)]
        else:
            mdm = self['mdm']
        self[column_name] = mdm

    def _get_pdi(self, windows):
        """ +DI, positive directional moving index

        :param windows: range
        :return:
        """
        window = self.get_int_positive(windows)
        pdm_column = 'pdm_{}'.format(window)
        tr_column = 'atr_{}'.format(window)
        pdi_column = 'pdi_{}'.format(window)
        self[pdi_column] = self[pdm_column] / self[tr_column] * 100
        return self[pdi_column]

    def _get_mdi(self, windows):
        window = self.get_int_positive(windows)
        mdm_column = 'mdm_{}'.format(window)
        tr_column = 'atr_{}'.format(window)
        mdi_column = 'mdi_{}'.format(window)
        self[mdi_column] = self[mdm_column] / self[tr_column] * 100
        return self[mdi_column]

    def _get_dx(self, windows):
        window = self.get_int_positive(windows)
        dx_column = 'dx_{}'.format(window)
        mdi_column = 'mdi_{}'.format(window)
        pdi_column = 'pdi_{}'.format(window)
        mdi, pdi = self[mdi_column], self[pdi_column]
        self[dx_column] = abs(pdi - mdi) / (pdi + mdi) * 100
        return self[dx_column]

    def _get_kdj_default(self):
        """ default KDJ, 9 periods

        :return: None
        """
        self['kdjk'] = self['kdjk_{}'.format(self.KDJ_WINDOW)]
        self['kdjd'] = self['kdjd_{}'.format(self.KDJ_WINDOW)]
        self['kdjj'] = self['kdjj_{}'.format(self.KDJ_WINDOW)]

    def _get_cr(self, window=26):
        """ Energy Index (Intermediate Willingness Index)

        https://support.futunn.com/en/topic167/?lang=en-us
        Use the relationship between the highest price, the lowest price and
        yesterday's middle price to reflect the market's willingness to buy
        and sell.

        :param window: window of the moving sum
        :return: None
        """
        middle = self._tp()
        last_middle = self._shift(middle, -1)
        ym = self._shift(middle, -1)
        high = self['high']
        low = self['low']
        p1_m = pd.concat((last_middle, high), axis=1).min(axis=1)
        p2_m = pd.concat((last_middle, low), axis=1).min(axis=1)
        p1 = self._mov_sum(high - p1_m, window)
        p2 = self._mov_sum(ym - p2_m, window)
        self['cr'] = cr = p1 / p2 * 100
        self['cr-ma1'] = self._shifted_cr_sma(cr, self.CR_MA1)
        self['cr-ma2'] = self._shifted_cr_sma(cr, self.CR_MA2)
        self['cr-ma3'] = self._shifted_cr_sma(cr, self.CR_MA3)

    def _shifted_cr_sma(self, cr, window):
        cr_sma = self._sma(cr, window)
        return self._shift(cr_sma, -int(window / 2.5 + 1))

    def _tp(self):
        return (self['close'] + self['high'] + self['low']).divide(3.0)

    def _get_tp(self):
        self['tp'] = self._tp()

    def _get_middle(self):
        self['middle'] = self._tp()

    def _calc_kd(self, column):
        param0, param1 = self.KDJ_PARAM
        k = 50.0
        # noinspection PyTypeChecker
        for i in param1 * column:
            k = param0 * k + i
            yield k

    def _get_kdjk(self, window):
        """ Get the K of KDJ

        K ＝ 2/3 × (prev. K) +1/3 × (curr. RSV)
        2/3 and 1/3 are the smooth parameters.
        :param window: number of periods
        :return: None
        """
        rsv_column = 'rsv_{}'.format(window)
        k_column = 'kdjk_{}'.format(window)
        self[k_column] = list(self._calc_kd(self.get(rsv_column)))

    def _get_kdjd(self, window):
        """ Get the D of KDJ

        D = 2/3 × (prev. D) +1/3 × (curr. K)
        2/3 and 1/3 are the smooth parameters.
        :param window: number of periods
        :return: None
        """
        k_column = 'kdjk_{}'.format(window)
        d_column = 'kdjd_{}'.format(window)
        self[d_column] = list(self._calc_kd(self.get(k_column)))

    def _get_kdjj(self, window):
        """ Get the J of KDJ

        J = 3K-2D
        :param self: data
        :param window: number of periods
        :return: None
        """
        k_column = 'kdjk_{}'.format(window)
        d_column = 'kdjd_{}'.format(window)
        j_column = 'kdjj_{}'.format(window)
        self[j_column] = 3 * self[k_column] - 2 * self[d_column]

    @staticmethod
    def _delta(series, window):
        return series.diff(-window).fillna(0.0)

    def _get_d(self, column, shifts):
        shift = self.to_int(shifts)
        column_name = '{}_{}_d'.format(column, shift)
        self[column_name] = self._delta(self[column], shift)

    @staticmethod
    def _mov_min(series, size):
        return series.rolling(min_periods=1, window=size, center=False).min()

    @staticmethod
    def _mov_max(series, size):
        return series.rolling(min_periods=1, window=size, center=False).max()

    @staticmethod
    def _mov_sum(series, size):
        return series.rolling(min_periods=1, window=size, center=False).sum()

    @staticmethod
    def _sma(series, size):
        return series.rolling(min_periods=1, window=size, center=False).mean()

    def _get_sma(self, column, windows):
        """ get simple moving average

        :param column: column to calculate
        :param windows: collection of window of simple moving average
        :return: None
        """
        window = self.get_int_positive(windows)
        column_name = '{}_{}_sma'.format(column, window)
        self[column_name] = self._sma(self[column], window)

    @staticmethod
    def _ema(series, window):
        return series.ewm(
            ignore_na=False,
            span=window,
            min_periods=0,
            adjust=True).mean()

    def _get_ema(self, column, windows):
        """ get exponential moving average

        :param column: column to calculate
        :param windows: collection of window of exponential moving average
        :return: None
        """
        window = self.get_int_positive(windows)
        column_name = '{}_{}_ema'.format(column, window)
        self[column_name] = self._ema(self[column], window)

    def _get_boll(self):
        """ Get Bollinger bands.

        boll_ub means the upper band of the Bollinger bands
        boll_lb means the lower band of the Bollinger bands
        boll_ub = MA + Kσ
        boll_lb = MA − Kσ
        M = BOLL_PERIOD
        K = BOLL_STD_TIMES
        :return: None
        """
        moving_avg = self._sma(self['close'], self.BOLL_PERIOD)
        moving_std = self._mstd(self['close'], self.BOLL_PERIOD)
        self['boll'] = moving_avg
        width = self.BOLL_STD_TIMES * moving_std
        self['boll_ub'] = moving_avg + width
        self['boll_lb'] = moving_avg - width

    def _get_macd(self):
        """ Moving Average Convergence Divergence

        This function will initialize all following columns.

        MACD Line (macd): (12-day EMA - 26-day EMA)
        Signal Line (macds): 9-day EMA of MACD Line
        MACD Histogram (macdh): MACD Line - Signal Line

        :return: None
        """
        close = self['close']
        ema_short = self._ema(close, self.MACD_EMA_SHORT)
        ema_long = self._ema(close, self.MACD_EMA_LONG)
        self['macd'] = ema_short - ema_long
        self['macds'] = self._ema(self['macd'], self.MACD_EMA_SIGNAL)
        self['macdh'] = self['macd'] - self['macds']

    def _get_ppo(self):
        """ Percentage Price Oscillator

        https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:price_oscillators_ppo

        Percentage Price Oscillator (PPO):
            {(12-day EMA - 26-day EMA)/26-day EMA} x 100

        Signal Line: 9-day EMA of PPO

        PPO Histogram: PPO - Signal Line

        :return: None
        """
        close = self['close']
        ppo_short = self._ema(close, self.PPO_EMA_SHORT)
        ppo_long = self._ema(close, self.PPO_EMA_LONG)
        self['ppo'] = (ppo_short - ppo_long) / ppo_long * 100
        self['ppos'] = self._ema(self['ppo'], self.PPO_EMA_SIGNAL)
        self['ppoh'] = self['ppo'] - self['ppos']

    def get_int_positive(self, windows):
        if isinstance(windows, int):
            window = windows
        else:
            window = self.to_int(windows)
            if window <= 0:
                raise IndexError("window must be greater than 0")
        return window

    @staticmethod
    def _mstd(series, window):
        return series.rolling(min_periods=1, window=window, center=False).std()

    def _get_mstd(self, column, windows):
        """ get moving standard deviation

        :param column: column to calculate
        :param windows: collection of window of moving standard deviation
        :return: None
        """
        window = self.get_int_positive(windows)
        column_name = '{}_{}_mstd'.format(column, window)
        self[column_name] = self._mstd(self[column], window)

    def _get_mvar(self, column, windows):
        """ get moving variance

        :param column: column to calculate
        :param windows: collection of window of moving variance
        :return: None
        """
        window = self.get_int_positive(windows)
        column_name = '{}_{}_mvar'.format(column, window)
        self[column_name] = self[column].rolling(
            min_periods=1, window=window, center=False).var()

    def _get_vwma(self, window=None):
        """ get Volume Weighted Moving Average

        The definition is available at:
        https://www.investopedia.com/articles/trading/11/trading-with-vwap-mvwap.asp

        :param window: number of periods relevant for the indicator
        :return: None
        """
        if window is None:
            window = self.VWMA
            column_name = 'vwma'
        else:
            column_name = 'vwma_{}'.format(window)
        window = self.get_int_positive(window)

        tpv = self['volume'] * self._tp()
        rolling_tpv = self._mov_sum(tpv, window)
        rolling_vol = self._mov_sum(self['volume'], window)
        self[column_name] = rolling_tpv / rolling_vol

    def _get_chop(self, window=None):
        """ get Choppiness Index (CHOP)

        See the definition of the index here:
        https://www.tradingview.com/education/choppinessindex/

        Calculation:

        100 * LOG10( SUM(ATR(1), n) / ( MaxHi(n) - MinLo(n) ) ) / LOG10(n)
        n = User defined period length.
        LOG10(n) = base-10 LOG of n
        ATR(1) = Average True Range (Period of 1)
        SUM(ATR(1), n) = Sum of the Average True Range over past n bars
        MaxHi(n) = The highest high over past n bars

        :param window: number of periods relevant for the indicator
        :return: None
        """
        if window is None:
            window = self.CHOP
            column_name = 'chop'
        else:
            column_name = 'chop_{}'.format(window)
        window = self.get_int_positive(window)
        atr = self._atr(1)
        atr_sum = self._mov_sum(atr, window)
        high = self._mov_max(self['high'], window)
        low = self._mov_min(self['low'], window)
        choppy = atr_sum / (high - low)
        numerator = np.log10(choppy) * 100
        denominator = np.log10(window)
        self[column_name] = numerator / denominator

    def _get_mfi(self, window=None):
        """ get money flow index

        The definition of money flow index is available at:
        https://www.investopedia.com/terms/m/mfi.asp

        :param window: number of periods relevant for the indicator
        :return: None
        """
        if window is None:
            window = self.MFI
            column_name = 'mfi'
        else:
            column_name = 'mfi_{}'.format(window)
        window = self.get_int_positive(window)
        middle = self._tp()
        money_flow = (middle * self["volume"]).fillna(0.0)
        shifted = self._shift(middle, -1)
        delta = (middle - shifted).fillna(0)
        pos_flow = money_flow.mask(delta < 0, 0)
        neg_flow = money_flow.mask(delta >= 0, 0)
        rolling_pos_flow = self._mov_sum(pos_flow, window)
        rolling_neg_flow = self._mov_sum(neg_flow, window)
        money_flow_ratio = rolling_pos_flow / (rolling_neg_flow + 1e-12)
        mfi = (1.0 - 1.0 / (1 + money_flow_ratio))
        mfi.iloc[:window] = 0.5
        self[column_name] = mfi

    def _get_kama(self, column, windows, fasts=None, slows=None):
        """ get Kaufman's Adaptive Moving Average.
        Implemented after
        https://school.stockcharts.com/doku.php?id=technical_indicators:kaufman_s_adaptive_moving_average

        :param column: column to calculate
        :param windows: collection of window of exponential moving average
        :param fasts: fastest EMA constant
        :param slows: slowest EMA constant
        :return: None
        """
        window = self.get_int_positive(windows)
        if slows is None or fasts is None:
            slow, fast = self.KAMA_SLOW, self.KAMA_FAST
            column_name = "{}_{}_kama".format(column, window)
        else:
            slow = self.get_int_positive(slows)
            fast = self.get_int_positive(fasts)
            column_name = '{}_{}_kama_{}_{}'.format(column, window, fast, slow)

        col = self[column]
        col_window_s = self._shift(col, -window)
        col_last = self._shift(col, -1)
        change = (col - col_window_s).abs()
        volatility = self._mov_sum((col - col_last).abs(), window)
        efficiency_ratio = change / volatility
        fast_ema_smoothing = 2.0 / (fast + 1)
        slow_ema_smoothing = 2.0 / (slow + 1)
        smoothing_2 = fast_ema_smoothing - slow_ema_smoothing
        efficient_smoothing = efficiency_ratio * smoothing_2
        smoothing = 2 * (efficient_smoothing + slow_ema_smoothing)

        # start with simple moving average
        kama = self._sma(col, window)
        last_kama = kama.iloc[window - 1]
        for i in range(window, len(kama)):
            cur = smoothing.iloc[i] * (col.iloc[i] - last_kama) + last_kama
            kama.iloc[i] = cur
            last_kama = cur
        self[column_name] = kama

    @staticmethod
    def parse_column_name(name):
        m = re.match(r'(.*)_([\d\-+~,.]+)_(\w+)', name)
        ret = (None,)
        if m is None:
            m = re.match(r'(.*)_([\d\-+~,]+)', name)
            if m is not None:
                ret = m.group(1, 2)
        else:
            ret = m.group(1, 2, 3)
            if any(map(lambda i: i in ret[0],
                       StockDataFrame.MULTI_SPLIT_INDICATORS)):
                m_prev = re.match(r'(.*)_([\d\-+~,.]+)_(\w+)', ret[0])
                if m_prev is not None:
                    ret = m_prev.group(1, 2, 3) + ret[1:]
        return ret

    CROSS_COLUMN_MATCH_STR = '(.+)_(x|xu|xd)_(.+)'

    @classmethod
    def is_cross_columns(cls, name):
        return re.match(cls.CROSS_COLUMN_MATCH_STR, name) is not None

    @classmethod
    def parse_cross_column(cls, name):
        m = re.match(cls.CROSS_COLUMN_MATCH_STR, name)
        ret = [None, None, None]
        if m is not None:
            ret = m.group(1, 2, 3)
        return ret

    def _get_rate(self):
        """ same as percent

        :return: None
        """
        self['rate'] = self['close'].pct_change() * 100

    def _get_delta(self, key):
        key_to_delta = key.replace('_delta', '')
        self[key] = self[key_to_delta].diff()
        return self[key]

    def _get_cross(self, key):
        left, op, right = StockDataFrame.parse_cross_column(key)
        lt_series = self[left] > self[right]
        # noinspection PyTypeChecker
        different = np.zeros_like(lt_series)
        if len(different) > 1:
            # noinspection PyTypeChecker
            different[1:] = np.diff(lt_series)
            different[0] = False
        if op == 'x':
            self[key] = different
        elif op == 'xu':
            self[key] = different & lt_series
        elif op == 'xd':
            self[key] = different & ~lt_series
        return self[key]

    def init_all(self):
        """ initialize all stats. in the handler """
        for handler in self.handler.values():
            handler()

    @property
    def handler(self):
        return {
            ('change',): self._get_change,
            ('rsi',): self._get_rsi,
            ('stochrsi',): self._get_stochrsi,
            ('rate',): self._get_rate,
            ('middle',): self._get_middle,
            ('tp',): self._get_tp,
            ('boll', 'boll_ub', 'boll_lb'): self._get_boll,
            ('macd', 'macds', 'macdh'): self._get_macd,
            ('ppo', 'ppos', 'ppoh'): self._get_ppo,
            ('kdjk', 'kdjd', 'kdjj'): self._get_kdj_default,
            ('cr', 'cr-ma1', 'cr-ma2', 'cr-ma3'): self._get_cr,
            ('cci',): self._get_cci,
            ('tr',): self._get_tr,
            ('atr',): self._get_atr,
            ('um', 'dm'): self._get_um_dm,
            ('pdi', 'mdi', 'dx', 'adx', 'adxr'): self._get_dmi,
            ('trix',): self._get_trix,
            ('tema',): self._get_tema,
            ('vr',): self._get_vr,
            ('dma',): self._get_dma,
            ('vwma',): self._get_vwma,
            ('chop',): self._get_chop,
            ('log-ret',): self._get_log_ret,
            ('mfi',): self._get_mfi,
            ('wt1', 'wt2'): self._get_wave_trend,
            ('wr',): self._get_wr,
            ('supertrend',
             'supertrend_lb',
             'supertrend_ub'): self._get_supertrend,
        }

    def __init_not_exist_column(self, key):
        for names, handler in self.handler.items():
            if key in names:
                handler()
                return

        if key.endswith('_delta'):
            self._get_delta(key)
        elif self.is_cross_columns(key):
            self._get_cross(key)
        else:
            ret = self.parse_column_name(key)
            if len(ret) == 5:
                c, r, t, s, f = ret
                func_name = '_get_{}'.format(t)
                getattr(self, func_name)(c, r, s, f)
            elif len(ret) == 3:
                c, r, t = ret
                func_name = '_get_{}'.format(t)
                getattr(self, func_name)(c, r)
            elif len(ret) == 2:
                c, r = ret
                func_name = '_get_{}'.format(c)
                getattr(self, func_name)(r)
            else:
                raise UserWarning("Invalid number of return arguments "
                                  "after parsing column name: '{}'"
                                  .format(key))

    def __init_column(self, key):
        if key not in self:
            if len(self) == 0:
                self[key] = []
            else:
                self.__init_not_exist_column(key)

    def __getitem__(self, item):
        try:
            result = wrap(super(StockDataFrame, self).__getitem__(item))
        except KeyError:
            try:
                if isinstance(item, list):
                    for column in item:
                        self.__init_column(column)
                else:
                    self.__init_column(item)
            except AttributeError:
                pass
            result = wrap(super(StockDataFrame, self).__getitem__(item))
        return result

    def till(self, end_date):
        return self[self.index <= end_date]

    def start_from(self, start_date):
        return self[self.index >= start_date]

    def within(self, start_date, end_date):
        return self.start_from(start_date).till(end_date)

    def copy(self, deep=True):
        return wrap(super(StockDataFrame, self).copy(deep))

    def _ensure_type(self, obj):
        """ override the method in pandas, omit the check

        This patch is not the perfect way but could make the lib work.
        """
        return obj

    @staticmethod
    def retype(value, index_column=None):
        """ if the input is a `DataFrame`, convert it to this class.

        :param index_column: name of the index column, default to `date`
        :param value: value to convert
        :return: this extended class
        """
        if index_column is None:
            index_column = 'date'

        if isinstance(value, StockDataFrame):
            return value
        elif isinstance(value, pd.DataFrame):
            # use all lower case for column name
            value.columns = map(lambda c: c.lower(), value.columns)

            if index_column in value.columns:
                value.set_index(index_column, inplace=True)
            return StockDataFrame(value)
        return value


class StockTradingEnv(gym.Env):
    """A stock trading environment for OpenAI gym"""
    metadata = {'render.modes': ['human']}

    def __init__(self,
                 df,
                 stock_dim,
                 hmax,
                 initial_amount,
                 buy_cost_pct,
                 sell_cost_pct,
                 reward_scaling,
                 state_space,
                 action_space,
                 tech_indicator_list,
                 turbulence_threshold=None,
                 make_plots=False,
                 print_verbosity=10,
                 day=0,
                 initial=True,
                 previous_state=[],
                 model_name='',
                 mode='',
                 iteration=''):
        self.day = day
        self.df = df
        self.stock_dim = stock_dim
        self.hmax = hmax
        self.initial_amount = initial_amount
        self.buy_cost_pct = buy_cost_pct
        self.sell_cost_pct = sell_cost_pct
        self.reward_scaling = reward_scaling
        self.state_space = state_space
        self.action_space = action_space
        self.tech_indicator_list = tech_indicator_list
        self.action_space = spaces.Box(low=-1, high=1, shape=(self.action_space,))
        self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.state_space,))
        self.data = self.df.loc[self.day, :]
        self.terminal = False
        self.make_plots = make_plots
        self.print_verbosity = print_verbosity
        self.turbulence_threshold = turbulence_threshold
        self.initial = initial
        self.previous_state = previous_state
        self.model_name = model_name
        self.mode = mode
        self.iteration = iteration
        # initalize state
        self.state = self._initiate_state()

        # initialize reward
        self.reward = 0
        self.turbulence = 0
        self.cost = 0
        self.trades = 0
        self.episode = 0
        # memorize all the total balance change
        self.asset_memory = [self.initial_amount]
        self.rewards_memory = []
        self.actions_memory = []
        self.date_memory = [self._get_date()]
        # self.reset()
        self._seed()

    def _sell_stock(self, index, action):
        def _do_sell_normal():
            if self.state[index + 1] > 0:
                # Sell only if the price is > 0 (no missing data in this particular date)
                # perform sell action based on the sign of the action
                if self.state[index + self.stock_dim + 1] > 0:
                    # Sell only if current asset is > 0
                    sell_num_shares = min(abs(action), self.state[index + self.stock_dim + 1])
                    sell_amount = self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct)
                    # update balance
                    self.state[0] += sell_amount

                    self.state[index + self.stock_dim + 1] -= sell_num_shares
                    self.cost += self.state[index + 1] * sell_num_shares * self.sell_cost_pct
                    self.trades += 1
                else:
                    sell_num_shares = 0
            else:
                sell_num_shares = 0

            return sell_num_shares

        # perform sell action based on the sign of the action
        if self.turbulence_threshold is not None:
            if self.turbulence >= self.turbulence_threshold:
                if self.state[index + 1] > 0:
                    # Sell only if the price is > 0 (no missing data in this particular date)
                    # if turbulence goes over threshold, just clear out all positions
                    if self.state[index + self.stock_dim + 1] > 0:
                        # Sell only if current asset is > 0
                        sell_num_shares = self.state[index + self.stock_dim + 1]
                        sell_amount = self.state[index + 1] * sell_num_shares * (1 - self.sell_cost_pct)
                        # update balance
                        self.state[0] += sell_amount
                        self.state[index + self.stock_dim + 1] = 0
                        self.cost += self.state[index + 1] * self.state[index + self.stock_dim + 1] * \
                                     self.sell_cost_pct
                        self.trades += 1
                    else:
                        sell_num_shares = 0
                else:
                    sell_num_shares = 0
            else:
                sell_num_shares = _do_sell_normal()
        else:
            sell_num_shares = _do_sell_normal()

        return sell_num_shares

    def _buy_stock(self, index, action):

        def _do_buy():
            if self.state[index + 1] > 0:
                # Buy only if the price is > 0 (no missing data in this particular date)
                available_amount = self.state[0] // self.state[index + 1]
                # print('available_amount:{}'.format(available_amount))

                # update balance
                buy_num_shares = min(available_amount, action)
                buy_amount = self.state[index + 1] * buy_num_shares * (1 + self.buy_cost_pct)
                self.state[0] -= buy_amount

                self.state[index + self.stock_dim + 1] += buy_num_shares

                self.cost += self.state[index + 1] * buy_num_shares * self.buy_cost_pct
                self.trades += 1
            else:
                buy_num_shares = 0

            return buy_num_shares

        # perform buy action based on the sign of the action
        if self.turbulence_threshold is None:
            buy_num_shares = _do_buy()
        else:
            if self.turbulence < self.turbulence_threshold:
                buy_num_shares = _do_buy()
            else:
                buy_num_shares = 0
                pass

        return buy_num_shares

    def _make_plot(self):
        plt.plot(self.asset_memory, 'r')
        plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
        plt.close()

    def step(self, actions):
        self.terminal = self.day >= len(self.df.index.unique()) - 1
        if self.terminal:
            # print(f"Episode: {self.episode}")
            if self.make_plots:
                self._make_plot()
            end_total_asset = self.state[0] + \
                              sum(np.array(self.state[1:(self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]))
            df_total_value = pd.DataFrame(self.asset_memory)
            tot_reward = self.state[0] + sum(np.array(self.state[1:(self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)])) - self.initial_amount
            df_total_value.columns = ['account_value']
            df_total_value['date'] = self.date_memory
            df_total_value['daily_return'] = df_total_value['account_value'].pct_change(1)
            if df_total_value['daily_return'].std() != 0:
                sharpe = (252 ** 0.5) * df_total_value['daily_return'].mean() / \
                         df_total_value['daily_return'].std()
            df_rewards = pd.DataFrame(self.rewards_memory)
            df_rewards.columns = ['account_rewards']
            df_rewards['date'] = self.date_memory[:-1]
            if self.episode % self.print_verbosity == 0:
                print(f"day: {self.day}, episode: {self.episode}")
                print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
                print(f"end_total_asset: {end_total_asset:0.2f}")
                print(f"total_reward: {tot_reward:0.2f}")
                print(f"total_cost: {self.cost:0.2f}")
                print(f"total_trades: {self.trades}")
                if df_total_value['daily_return'].std() != 0:
                    print(f"Sharpe: {sharpe:0.3f}")
                print("=================================")

            if (self.model_name != '') and (self.mode != ''):
                df_actions = self.save_action_memory()
                df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode, self.model_name, self.iteration))
                df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode, self.model_name, self.iteration), index=False)
                df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode, self.model_name, self.iteration), index=False)
                plt.plot(self.asset_memory, 'r')
                plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode, self.model_name, self.iteration), index=False)
                plt.close()

            # Add outputs to logger interface
            # logger.record("environment/portfolio_value", end_total_asset)
            # logger.record("environment/total_reward", tot_reward)
            # logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
            # logger.record("environment/total_cost", self.cost)
            # logger.record("environment/total_trades", self.trades)

            return self.state, self.reward, self.terminal, {}

        else:

            actions = actions * self.hmax  # actions initially is scaled between 0 to 1
            actions = (actions.astype(int))  # convert into integer because we can't by fraction of shares
            if self.turbulence_threshold is not None:
                if self.turbulence >= self.turbulence_threshold:
                    actions = np.array([-self.hmax] * self.stock_dim)
            begin_total_asset = self.state[0] + \
                                sum(np.array(self.state[1:(self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]))
            # print("begin_total_asset:{}".format(begin_total_asset))

            argsort_actions = np.argsort(actions)

            sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
            buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]

            for index in sell_index:
                # print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
                # print(f'take sell action before : {actions[index]}')
                actions[index] = self._sell_stock(index, actions[index]) * (-1)
                # print(f'take sell action after : {actions[index]}')
                # print(f"Num shares after: {self.state[index+self.stock_dim+1]}")

            for index in buy_index:
                # print('take buy action: {}'.format(actions[index]))
                actions[index] = self._buy_stock(index, actions[index])

            self.actions_memory.append(actions)

            self.day += 1
            self.data = self.df.loc[self.day, :]
            if self.turbulence_threshold is not None:
                self.turbulence = self.data['turbulence'].values[0]
            self.state = self._update_state()

            end_total_asset = self.state[0] + \
                              sum(np.array(self.state[1:(self.stock_dim + 1)]) * np.array(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]))
            self.asset_memory.append(end_total_asset)
            self.date_memory.append(self._get_date())
            self.reward = end_total_asset - begin_total_asset
            self.rewards_memory.append(self.reward)
            self.reward = self.reward * self.reward_scaling

        return self.state, self.reward, self.terminal, {}

    def reset(self):
        # initiate state
        self.state = self._initiate_state()

        if self.initial:
            self.asset_memory = [self.initial_amount]
        else:
            previous_total_asset = self.previous_state[0] + \
                                   sum(np.array(self.state[1:(self.stock_dim + 1)]) * np.array(self.previous_state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]))
            self.asset_memory = [previous_total_asset]

        self.day = 0
        self.data = self.df.loc[self.day, :]
        self.turbulence = 0
        self.cost = 0
        self.trades = 0
        self.terminal = False
        # self.iteration=self.iteration
        self.rewards_memory = []
        self.actions_memory = []
        self.date_memory = [self._get_date()]

        self.episode += 1

        return self.state

    def render(self, mode='human', close=False):
        return self.state

    def _initiate_state(self):
        if self.initial:
            # For Initial State
            if len(self.df.tic.unique()) > 1:
                # for multiple stock
                state = [self.initial_amount] + \
                        self.data.close.values.tolist() + \
                        [0] * self.stock_dim + \
                        sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list], [])
            else:
                # for single stock
                state = [self.initial_amount] + \
                        [self.data.close] + \
                        [0] * self.stock_dim + \
                        sum([[self.data[tech]] for tech in self.tech_indicator_list], [])
        else:
            # Using Previous State
            if len(self.df.tic.unique()) > 1:
                # for multiple stock
                state = [self.previous_state[0]] + \
                        self.data.close.values.tolist() + \
                        self.previous_state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)] + \
                        sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list], [])
            else:
                # for single stock
                state = [self.previous_state[0]] + \
                        [self.data.close] + \
                        self.previous_state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)] + \
                        sum([[self.data[tech]] for tech in self.tech_indicator_list], [])
        return state

    def _update_state(self):
        if len(self.df.tic.unique()) > 1:
            # for multiple stock
            state = [self.state[0]] + \
                    self.data.close.values.tolist() + \
                    list(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]) + \
                    sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list], [])

        else:
            # for single stock
            state = [self.state[0]] + \
                    [self.data.close] + \
                    list(self.state[(self.stock_dim + 1):(self.stock_dim * 2 + 1)]) + \
                    sum([[self.data[tech]] for tech in self.tech_indicator_list], [])

        return state

    def _get_date(self):
        if len(self.df.tic.unique()) > 1:
            date = self.data.date.unique()[0]
        else:
            date = self.data.date
        return date

    def save_asset_memory(self):
        date_list = self.date_memory
        asset_list = self.asset_memory
        # print(len(date_list))
        # print(len(asset_list))
        df_account_value = pd.DataFrame({'date': date_list, 'account_value': asset_list})
        return df_account_value

    def save_action_memory(self):
        if len(self.df.tic.unique()) > 1:
            # date and close price length must match actions length
            date_list = self.date_memory[:-1]
            df_date = pd.DataFrame(date_list)
            df_date.columns = ['date']

            action_list = self.actions_memory
            df_actions = pd.DataFrame(action_list)
            df_actions.columns = self.data.tic.values
            df_actions.index = df_date.date
            # df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
        else:
            date_list = self.date_memory[:-1]
            action_list = self.actions_memory
            df_actions = pd.DataFrame({'date': date_list, 'actions': action_list})
        return df_actions

    def _seed(self, seed=None):
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    def get_sb_env(self):
        e = DummyVecEnv([lambda: self])
        obs = e.reset()
        return e, obs


MODELS = {"a2c": A2C, "ddpg": DDPG, "td3": TD3, "sac": SAC, "ppo": PPO}
MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()}

NOISE = {
    "normal": NormalActionNoise,
    "ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise,
}


class DRLAgent:
    """Provides implementations for DRL algorithms

    Attributes
    ----------
        env: gym environment class
            user-defined class

    Methods
    -------
        train_PPO()
            the implementation for PPO algorithm
        train_A2C()
            the implementation for A2C algorithm
        train_DDPG()
            the implementation for DDPG algorithm
        train_TD3()
            the implementation for TD3 algorithm
        train_SAC()
            the implementation for SAC algorithm
        DRL_prediction()
            make a prediction in a test dataset and get results
    """

    @staticmethod
    def DRL_prediction(model, environment):
        test_env, test_obs = environment.get_sb_env()
        """make a prediction"""
        account_memory = []
        actions_memory = []
        test_env.reset()
        for i in range(len(environment.df.index.unique())):
            action, _states = model.predict(test_obs)
            # account_memory = test_env.env_method(method_name="save_asset_memory")
            # actions_memory = test_env.env_method(method_name="save_action_memory")
            test_obs, rewards, dones, info = test_env.step(action)
            if i == (len(environment.df.index.unique()) - 2):
                account_memory = test_env.env_method(method_name="save_asset_memory")
                actions_memory = test_env.env_method(method_name="save_action_memory")
            if dones[0]:
                print("hit end!")
                break
        return account_memory[0], actions_memory[0]

    def __init__(self, env):
        self.env = env

    def get_model(
            self,
            model_name,
            policy="MlpPolicy",
            policy_kwargs=None,
            model_kwargs=None,
            verbose=1,
    ):
        if model_name not in MODELS:
            raise NotImplementedError("NotImplementedError")

        if model_kwargs is None:
            model_kwargs = MODEL_KWARGS[model_name]

        if "action_noise" in model_kwargs:
            n_actions = self.env.action_space.shape[-1]
            model_kwargs["action_noise"] = NOISE[model_kwargs["action_noise"]](
                mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions)
            )
        print(model_kwargs)
        model = MODELS[model_name](
            policy=policy,
            env=self.env,
            tensorboard_log=f"{config.TENSORBOARD_LOG_DIR}/{model_name}",
            verbose=verbose,
            policy_kwargs=policy_kwargs,
            **model_kwargs,
        )
        return model

    def train_model(self, model, tb_log_name, total_timesteps=5000):
        model = model.learn(total_timesteps=total_timesteps, tb_log_name=tb_log_name)
        return model


_deepcopy_dispatch = d = {}


def _deepcopy_atomic(x, memo):
    return x


dispatch_table = {}


class Error(Exception):
    pass


error = Error  # backward compatibility

try:
    from org.python.core import PyStringMap
except ImportError:
    PyStringMap = None


def _keep_alive(x, memo):
    """Keeps a reference to the object x in the memo.

    Because we remember objects by their id, we have
    to assure that possibly temporary objects are kept
    alive by referencing them.
    We store a reference at the id of the memo, which should
    normally not be used unless someone tries to deepcopy
    the memo itself...
    """
    try:
        memo[id(memo)].append(x)
    except KeyError:
        # aha, this is the first one :-)
        memo[id(memo)] = [x]


def deepcopy(x, memo=None, _nil=[]):
    """Deep copy operation on arbitrary Python objects.

    See the module's __doc__ string for more info.
    """

    if memo is None:
        memo = {}

    d = id(x)
    y = memo.get(d, _nil)
    if y is not _nil:
        return y

    cls = type(x)

    copier = _deepcopy_dispatch.get(cls)
    if copier is not None:
        y = copier(x, memo)
    else:
        if issubclass(cls, type):
            y = _deepcopy_atomic(x, memo)
        else:
            copier = getattr(x, "__deepcopy__", None)
            if copier is not None:
                y = copier(memo)
            else:
                reductor = dispatch_table.get(cls)
                if reductor:
                    rv = reductor(x)
                else:
                    reductor = getattr(x, "__reduce_ex__", None)
                    if reductor is not None:
                        rv = reductor(4)
                    else:
                        reductor = getattr(x, "__reduce__", None)
                        if reductor:
                            rv = reductor()
                        else:
                            raise Error(
                                "un(deep)copyable object of type %s" % cls)
                if isinstance(rv, str):
                    y = x
                else:
                    y = _reconstruct(x, memo, *rv)

    # If is its own copy, don't memoize.
    if y is not x:
        memo[d] = y
        _keep_alive(x, memo)  # Make sure x lives at least as long as d
    return y


def _reconstruct(x, memo, func, args,
                 state=None, listiter=None, dictiter=None,
                 deepcopy=deepcopy):
    deep = memo is not None
    if deep and args:
        args = (deepcopy(arg, memo) for arg in args)
    y = func(*args)
    if deep:
        memo[id(x)] = y

    if state is not None:
        if deep:
            state = deepcopy(state, memo)
        if hasattr(y, '__setstate__'):
            y.__setstate__(state)
        else:
            if isinstance(state, tuple) and len(state) == 2:
                state, slotstate = state
            else:
                slotstate = None
            if state is not None:
                y.__dict__.update(state)
            if slotstate is not None:
                for key, value in slotstate.items():
                    setattr(y, key, value)

    if listiter is not None:
        if deep:
            for item in listiter:
                item = deepcopy(item, memo)
                y.append(item)
        else:
            for item in listiter:
                y.append(item)
    if dictiter is not None:
        if deep:
            for key, value in dictiter:
                key = deepcopy(key, memo)
                value = deepcopy(value, memo)
                y[key] = value
        else:
            for key, value in dictiter:
                y[key] = value
    return y


def get_daily_return(df, value_col_name="account_value"):
    df = deepcopy(df)
    df["daily_return"] = df[value_col_name].pct_change(1)
    df["date"] = pd.to_datetime(df["date"])
    df.set_index("date", inplace=True, drop=True)
    df.index = df.index.tz_localize("UTC")
    return pd.Series(df["daily_return"], index=df.index)


def value_at_risk(returns, period=None, sigma=2.0):
    """
    Get value at risk (VaR).

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    period : str, optional
        Period over which to calculate VaR. Set to 'weekly',
        'monthly', or 'yearly', otherwise defaults to period of
        returns (typically daily).
    sigma : float, optional
        Standard deviations of VaR, default 2.
    """
    if period is not None:
        returns_agg = ep.aggregate_returns(returns, period)
    else:
        returns_agg = returns.copy()

    value_at_risk = returns_agg.mean() - sigma * returns_agg.std()
    return value_at_risk


SIMPLE_STAT_FUNCS = [
    ep.annual_return,
    ep.cum_returns_final,
    ep.annual_volatility,
    ep.sharpe_ratio,
    ep.calmar_ratio,
    ep.stability_of_timeseries,
    ep.max_drawdown,
    ep.omega_ratio,
    ep.sortino_ratio,
    stats.skew,
    stats.kurtosis,
    ep.tail_ratio,
    value_at_risk
]

FACTOR_STAT_FUNCS = [
    ep.alpha,
    ep.beta,
]

STAT_FUNC_NAMES = {
    'annual_return': 'Annual return',
    'cum_returns_final': 'Cumulative returns',
    'annual_volatility': 'Annual volatility',
    'sharpe_ratio': 'Sharpe ratio',
    'calmar_ratio': 'Calmar ratio',
    'stability_of_timeseries': 'Stability',
    'max_drawdown': 'Max drawdown',
    'omega_ratio': 'Omega ratio',
    'sortino_ratio': 'Sortino ratio',
    'skew': 'Skew',
    'kurtosis': 'Kurtosis',
    'tail_ratio': 'Tail ratio',
    'common_sense_ratio': 'Common sense ratio',
    'value_at_risk': 'Daily value at risk',
    'alpha': 'Alpha',
    'beta': 'Beta',
}


def gross_lev(positions):
    """
    Calculates the gross leverage of a strategy.

    Parameters
    ----------
    positions : pd.DataFrame
        Daily net position values.
         - See full explanation in tears.create_full_tear_sheet.

    Returns
    -------
    pd.Series
        Gross leverage.
    """

    exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
    return exposure / positions.sum(axis=1)


def get_txn_vol(transactions):
    """
    Extract daily transaction data from set of transaction objects.

    Parameters
    ----------
    transactions : pd.DataFrame
        Time series containing one row per symbol (and potentially
        duplicate datetime indices) and columns for amount and
        price.

    Returns
    -------
    pd.DataFrame
        Daily transaction volume and number of shares.
         - See full explanation in tears.create_full_tear_sheet.
    """

    txn_norm = transactions.copy()
    txn_norm.index = txn_norm.index.normalize()
    amounts = txn_norm.amount.abs()
    prices = txn_norm.price
    values = amounts * prices
    daily_amounts = amounts.groupby(amounts.index).sum()
    daily_values = values.groupby(values.index).sum()
    daily_amounts.name = "txn_shares"
    daily_values.name = "txn_volume"
    return pd.concat([daily_values, daily_amounts], axis=1)


def get_turnover(positions, transactions, denominator='AGB'):
    """
     - Value of purchases and sales divided
    by either the actual gross book or the portfolio value
    for the time step.

    Parameters
    ----------
    positions : pd.DataFrame
        Contains daily position values including cash.
        - See full explanation in tears.create_full_tear_sheet
    transactions : pd.DataFrame
        Prices and amounts of executed trades. One row per trade.
        - See full explanation in tears.create_full_tear_sheet
    denominator : str, optional
        Either 'AGB' or 'portfolio_value', default AGB.
        - AGB (Actual gross book) is the gross market
        value (GMV) of the specific algo being analyzed.
        Swapping out an entire portfolio of stocks for
        another will yield 200% turnover, not 100%, since
        transactions are being made for both sides.
        - We use average of the previous and the current end-of-period
        AGB to avoid singularities when trading only into or
        out of an entire book in one trading period.
        - portfolio_value is the total value of the algo's
        positions end-of-period, including cash.

    Returns
    -------
    turnover_rate : pd.Series
        timeseries of portfolio turnover rates.
    """

    txn_vol = get_txn_vol(transactions)
    traded_value = txn_vol.txn_volume

    if denominator == 'AGB':
        # Actual gross book is the same thing as the algo's GMV
        # We want our denom to be avg(AGB previous, AGB current)
        AGB = positions.drop('cash', axis=1).abs().sum(axis=1)
        denom = AGB.rolling(2).mean()

        # Since the first value of pd.rolling returns NaN, we
        # set our "day 0" AGB to 0.
        denom.iloc[0] = AGB.iloc[0] / 2
    elif denominator == 'portfolio_value':
        denom = positions.sum(axis=1)
    else:
        raise ValueError(
            "Unexpected value for denominator '{}'. The "
            "denominator parameter must be either 'AGB'"
            " or 'portfolio_value'.".format(denominator)
        )

    denom.index = denom.index.normalize()
    turnover = traded_value.div(denom, axis='index')
    turnover = turnover.fillna(0)
    return turnover


def perf_stats(returns, factor_returns=None, positions=None,
               transactions=None, turnover_denom='AGB'):
    """
    Calculates various performance metrics of a strategy, for use in
    plotting.show_perf_stats.

    Parameters
    ----------
    returns : pd.Series
        Daily returns of the strategy, noncumulative.
         - See full explanation in tears.create_full_tear_sheet.
    factor_returns : pd.Series, optional
        Daily noncumulative returns of the benchmark factor to which betas are
        computed. Usually a benchmark such as market returns.
         - This is in the same style as returns.
         - If None, do not compute alpha, beta, and information ratio.
    positions : pd.DataFrame
        Daily net position values.
         - See full explanation in tears.create_full_tear_sheet.
    transactions : pd.DataFrame
        Prices and amounts of executed trades. One row per trade.
        - See full explanation in tears.create_full_tear_sheet.
    turnover_denom : str
        Either AGB or portfolio_value, default AGB.
        - See full explanation in txn.get_turnover.

    Returns
    -------
    pd.Series
        Performance metrics.
    """

    stats = pd.Series(dtype=pd.StringDtype())
    for stat_func in SIMPLE_STAT_FUNCS:
        stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)

    if positions is not None:
        stats['Gross leverage'] = gross_lev(positions).mean()
        if transactions is not None:
            stats['Daily turnover'] = get_turnover(positions,
                                                   transactions,
                                                   turnover_denom).mean()
    if factor_returns is not None:
        for stat_func in FACTOR_STAT_FUNCS:
            res = stat_func(returns, factor_returns)
            stats[STAT_FUNC_NAMES[stat_func.__name__]] = res

    return stats


def backtest_stats(account_value, value_col_name="account_value"):
    dr_test = get_daily_return(account_value, value_col_name=value_col_name)
    perf_stats_all = perf_stats(
        returns=dr_test,
        turnover_denom="AGB",
    )
    print(perf_stats_all)
    return perf_stats_all
