from typing import Optional, Union, List

import pandas as pd
import numpy as np
import torch


class StockTradingEnvironment:
    INPUT_COL = [
        'open', 'high', 'low', 'close',
        'change', "bbi_signal", "bias_signal", "cci_signal",
        "dma_signal", "expma_signal", "kdj_signal", "macd_signal",
        "mfi_signal", "mi_signal", "mtm_signal", "priceosc_signal",
        "psy_signal", "roc_signal"

    ]
    past_input_col = [
        'open0', 'high0', 'low0', 's_cci',
        's_mtm', 's_macd', 's_dma', 's_bias',
        's_k', 's_d', 's_j', 's_mfi',
        's_priceosc', 's_psy', 's_roc', 's_rocma',
        's_wr', 's_change', 'ma_5', 'ma_10',
        'ma_20', 'ma_30', 'close', 'bbi'
    ]

    def __init__(self, stock_data: pd.DataFrame, window_size: int):
        self.INPUT_COL = StockTradingEnvironment.INPUT_COL
        self.stock_data = stock_data
        self.window_size = window_size
        self.current_step = 65
        self.position_his = [0]

    def reset(self, **kwargs):
        self.__init__(**kwargs)
        return [self.get_state(), 0]

    def get_state(self):
        input_x = self.stock_data.iloc[self.current_step: self.current_step + self.window_size].copy()
        price_seq = ['ma_5', 'ma_10', 'ma_20', 'ma_30', 'close', 'bbi', 'open', 'high', 'low']
        base_price = input_x['close'].values[0]
        for tar_col in price_seq:
            if tar_col in input_x.columns.values:
                input_x[tar_col] = input_x[tar_col].apply(lambda x: x / base_price)
        input_x = input_x[self.INPUT_COL]
        return input_x

    def step(self, action):
        action_space = [0, 1]

        def calculate_reward(price_list,
                             current_position,
                             previous_position,
                             basis_point_fee=0.001,
                             risk_free_profit=0.0001):
            """
            计算奖励
            :param risk_free_profit:
            :param price_list: 到今日的价格表
            :param current_position: 当前仓位
            :param previous_position: 前一天的仓位
            :param basis_point_fee:  基点费用比例
            :return:
            """
            # 计算rt
            current_price = price_list[-1]
            previous_price = price_list[-2]
            rt = current_price / previous_price - 1

            # 计算sigma_t
            sigma_t_L = np.std(price_list[-61:-1]) / np.mean(price_list[-61:-1])
            sigma_t_S = np.std(price_list[-31:-1]) / np.mean(price_list[-31:-1])
            sigma_t = sigma_t_S / sigma_t_L
            # 计算At-1
            At_minus_1 = current_position

            # 计算At-2
            At_minus_2 = previous_position
            assert At_minus_1 in [0, 0.5, 1] and At_minus_2 in [0, 0.5, 1]
            # 计算Rt
            Rt = (1 + risk_free_profit * (1 - At_minus_1) + At_minus_1 * rt) * (
                    1 - basis_point_fee * np.abs(At_minus_1 - At_minus_2)) - 1
            real_rt = (1 + risk_free_profit * (1 - At_minus_1) + At_minus_1 * rt) * (
                    1 - basis_point_fee * np.abs(At_minus_1 - At_minus_2)) - 1
            keep_rt = rt
            return Rt * 100, real_rt, keep_rt

        action = action_space[action]
        self.position_his.append(action)
        reward = calculate_reward(
            self.stock_data.iloc[
            self.current_step + self.window_size - 65:
            self.current_step + self.window_size + 1,
            self.stock_data.columns.get_loc('close')
            ].values,
            self.position_his[-1],
            self.position_his[-2] if len(self.position_his) >= 2 else 0
        )
        self.current_step += 1

        done = self.current_step + self.window_size == len(self.stock_data) - 7
        if not done:
            next_state = [self.get_state(), action]
            done = 0
        else:
            next_state = [torch.zeros([1, self.window_size, len(self.INPUT_COL)]).cuda(), 0]
            done = 1
        return next_state, reward, done
