"""
@project:Deep Reinforcement Learning Framework (DRLF)
@author:liuzeyu
@time:2022-09-01
@version:v0.2.3
@Update description:
请查看doc文件夹下的update.html记录
"""
# online deploy
# import os
# import datetime
# import itertools
# import numpy as np
# import pandas as pd
# import matplotlib
# import matplotlib.pyplot as plt
# import stable_baselines3 as sb3
# import configparser as cp
# import framework_config as config
# from Datadownloader import TushareDownloader, data_split
# from finrl.preprocessing.preprocessors import FeatureEngineer
# from finrl.env.env_stocktrading import StockTradingEnv
# from finrl.model.models import DRLAgent
# from finrl.trade.backtest import backtest_stats

# offline deploy
import os
import datetime
import configparser as cp
from DRLF_dependency import *
import stable_baselines3 as sb3
from Datadownloader import TushareDownloader, data_split, roll_time_series
import webbrowser


class RLdata:

    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    @staticmethod
    def create_work_fold():
        if not os.path.exists("./" + config.DATA_SAVE_DIR):
            os.makedirs("./" + config.DATA_SAVE_DIR)
        if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
            os.makedirs("./" + config.TRAINED_MODEL_DIR)
        if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
            os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
        if not os.path.exists("./" + config.RESULTS_DIR):
            os.makedirs("./" + config.RESULTS_DIR)

    def getData(self):
        start_date = self.config.get('data_para', 'getData_start')
        end_date = self.config.get('data_para', 'getData_end')
        ticker_list = self.config.get('data_para', 'ticker_list')
        df = TushareDownloader(start_date=start_date, end_date=end_date, ticker_list=getattr(config, ticker_list)).fetch_data()
        if self.config.getboolean('save', 'save_data'):
            export_path = self.config.get('data_path', 'getData_export_path')
            df.to_csv(export_path)
        return df

    def processedData(self):
        df = pd.read_csv(self.config.get('data_path', 'getData_export_path'))
        fe = FeatureEngineer(use_technical_indicator=True, tech_indicator_list=config.TECHNICAL_INDICATORS_LIST, use_turbulence=True, user_defined_feature=False)
        processed = fe.preprocess_data(df)
        if self.config.getboolean('operation', 'process_null_rows'):
            list_ticker = processed["tic"].unique().tolist()
            list_date = list(pd.date_range(processed['date'].min(), processed['date'].max()).astype(str))
            combination = list(itertools.product(list_date, list_ticker))
            df_full = pd.DataFrame(combination, columns=["date", "tic"]).merge(processed, on=["date", "tic"], how="left")
            df_full = df_full[df_full['date'].isin(processed['date'])]
            processed = df_full.sort_values(['date', 'tic']).fillna(0)
        if self.config.getboolean('operation', 'sliding_window_sampling'):
            max_timeshift = self.config.getint("data_para", 'sliding_window_max_timeshift')
            min_timeshift = self.config.getint("data_para", 'sliding_window_min_timeshift')
            rolling_direction = self.config.getint("data_para", 'sliding_window_rolling_direction')
            processed = roll_time_series(processed, column_id='tic', column_sort='date', max_timeshift=max_timeshift, min_timeshift=min_timeshift, rolling_direction=rolling_direction)
        if self.config.getboolean('save', 'save_processed_data'):
            export_path = self.config.get('data_path', 'processedData_export_path')
            pd.DataFrame(processed).to_csv(export_path)
        return processed

    def splitData(self):
        data = pd.read_csv(self.config.get('data_path', 'processedData_export_path'))
        train_start = self.config.get('split_para', 'splitData_train_start')
        train_end = self.config.get('split_para', 'splitData_train_end')
        trade_start = self.config.get('split_para', 'splitData_trade_start')
        trade_end = self.config.get('split_para', 'splitData_trade_end')
        train = data_split(df=data, start=train_start, end=train_end)
        trade = data_split(df=data, start=trade_start, end=trade_end)
        stock_dimension = len(train.tic.unique())
        state_space = 1 + 2 * stock_dimension + len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension
        return train, trade, stock_dimension, state_space


class RLenv:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    def getEnv(self, state_space, stock_dimension, train):
        env_kwargs = {
            "hmax": self.config.getint('env_para', 'hmax'),
            "initial_amount": self.config.getint('env_para', 'initial_amount'),
            "buy_cost_pct": self.config.getfloat('env_para', 'buy_cost_pct'),
            "sell_cost_pct": self.config.getfloat('env_para', 'sell_cost_pct'),
            "state_space": state_space,
            "stock_dim": stock_dimension,
            "tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
            "action_space": stock_dimension,
            "reward_scaling": self.config.getfloat('env_para', 'reward_scaling')
        }
        e_train_gym = StockTradingEnv(df=train, **env_kwargs)
        env_train, _ = e_train_gym.get_sb_env()
        return env_train, env_kwargs


class RLagent:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    def getAgent(self, env_train):
        agent = DRLAgent(env=env_train)
        model_name = self.config.get('agent_para', 'model_name')
        para = getattr(config, model_name.upper() + '_PARAMS')
        model = agent.get_model(model_name=model_name, model_kwargs=para)
        agent = DRLAgent(env=env_train)
        total_timesteps = self.config.getint('agent_para', 'total_timesteps')
        trained = agent.train_model(model=model, tb_log_name=model_name, total_timesteps=total_timesteps)
        if self.config.getboolean('save', 'save_model'):
            trained.save(f"{config.TRAINED_MODEL_DIR}/" + model_name)


class RLtrade:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    def getTrade(self, trade, env_kwargs):
        processed_full = pd.read_csv(self.config.get('data_path', 'processedData_export_path'))
        if self.config.getboolean('operation', 'self_adaption_threshold'):
            start_date = self.config.get('split_para', 'splitData_train_start')
            end_date = self.config.get('split_para', 'splitData_train_end')
            data_turbulence = processed_full[(processed_full.date < end_date) & (processed_full.date >= start_date)]
            insample_turbulence = data_turbulence.drop_duplicates(subset=['date'])
            turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, 1)
            print("turbulence_threshold=", turbulence_threshold)
            e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=turbulence_threshold, **env_kwargs)
        elif self.config.getboolean('operation', 'set_turbulence'):
            turbulence_threshold = self.config.getint('trade_para', 'turbulence_threshold')
            print("turbulence_threshold=", turbulence_threshold)
            e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=turbulence_threshold, **env_kwargs)
        else:
            e_trade_gym = StockTradingEnv(df=trade, **env_kwargs)
        env_trade, obs_trade = e_trade_gym.get_sb_env()
        return e_trade_gym, env_trade, obs_trade


class RLaction:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    def getAction(self, e_trade_gym):
        model_name = self.config.get('action_para', 'model_name')
        trained = getattr(sb3, model_name.upper()).load((f"{config.TRAINED_MODEL_DIR}/" + model_name))
        df_account_value, df_actions = DRLAgent.DRL_prediction(model=trained, environment=e_trade_gym)
        if self.config.getboolean('operation', 'check_actions_df'):
            print('-' * 100)
            print('请查看每个交易日的actions记录：')
            print(df_actions)
        if self.config.getboolean('save', 'save_actions'):
            now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
            df_actions.to_csv("./" + config.RESULTS_DIR + "/actions_" + now + '.csv')
        if self.config.getboolean('operation', 'check_account_value'):
            df = pd.concat([df_account_value, df_account_value.account_value.shift(-1) - df_account_value.account_value], axis=1, ignore_index=True)
            df = pd.concat([df, df.iloc[:, 2] / df.iloc[:, 1] * 100], axis=1, ignore_index=True)
            df.columns = ['交易日', '账户金额', '当日盈亏', '盈亏百分数']
            print('-' * 100)
            print('请查看每个交易日收盘后的账户金额：')
            print(df)
        if self.config.getboolean('save', 'save_account'):
            now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
            df_account_value.to_csv("./" + config.RESULTS_DIR + "/account_" + now + '.csv')
        return df_actions, df_account_value


class RLbacktest:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    def getBacktest(self, df_account_value):
        print('-' * 100)
        print("请查看算法交易的回测结果：")
        now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
        perf_stats_all = backtest_stats(account_value=df_account_value)
        if self.config.getboolean('save', 'save_backtest_result'):
            perf_stats_all = pd.DataFrame(perf_stats_all)
            if self.config.getboolean('operation', 'set_backtest_threshold'):
                annual_return_ts = self.config.getint('backtest_para', 'Annual_return_threshold')
                if perf_stats_all.iloc[0, 0] >= annual_return_ts:
                    perf_stats_all.to_csv("./" + config.RESULTS_DIR + "/perf_stats_all_" + now + '.csv')
                else:
                    print('回测结果未达预期，不保存')
            perf_stats_all.to_csv("./" + config.RESULTS_DIR + "/perf_stats_all_" + now + '.csv')
        return perf_stats_all


class RLplot:
    def __init__(self, finrl_config_path: str = 'quick_config.ini'):
        self.config = cp.ConfigParser()
        self.config.read(finrl_config_path, encoding='utf-8')

    @staticmethod
    def plot(tradedata, actionsdata, ticker):
        matplotlib.use('TKAgg')
        df_plot = pd.merge(left=tradedata, right=actionsdata, on='date', how='inner')
        plot_df = df_plot.loc[df_plot.tic == ticker]
        ticker = str(ticker)
        plot_df.loc[:, ['date', 'tic', 'close', ticker]].reset_index(inplace=True)
        fig = plt.figure(figsize=(12, 6))
        ax = fig.add_subplot(111)
        ax.plot(plot_df.index, plot_df['close'], label=ticker)
        ax.plot(plot_df.loc[plot_df[ticker] > 0].index, plot_df['close'][plot_df[ticker] > 0], label='Buy', linewidth=0, marker='^', c='r')
        ax.plot(plot_df.loc[plot_df[ticker] < 0].index, plot_df['close'][plot_df[ticker] < 0], label='Sell', linewidth=0, marker='v', c='b')
        plt.legend(loc='best')
        plt.grid(True)
        plt.title(str(ticker) + '__' + str(plot_df['date'].min()) + '___' + str(plot_df['date'].max()))
        plt.show()

    def plot_main(self):
        action_path = self.config.get('plot', 'action_path')
        stock_code = self.config.getint('plot', 'stock_code')
        df_actions = pd.read_csv(action_path)
        df_actions.set_index(keys='date', inplace=True)
        train, trade, stock_dimension, state_space = RLdata().splitData()
        RLplot.plot(tradedata=trade, actionsdata=df_actions, ticker=stock_code)


def main():
    # S1-创建工作目录
    # RLdata.create_work_fold()

    # S2-从tushare数据接口获取股票数据
    # df = RLdata().getData()

    # S3-特征增强与数据空值处理
    # df_p = RLdata().processedData()

    # S4-切分train、trade数据集
    # train, trade, stock_dimension, state_space = RLdata().splitData()

    # S5-定义环境（env）
    # env_train, env_kwargs = RLenv().getEnv(state_space=state_space, stock_dimension=stock_dimension, train=train)

    # S6-定义智能体（agent）df列没
    # RLagent().getAgent(env_train=env_train)

    # S7-定义交易（trade）
    # e_trade_gym, env_trade, obs_trade = RLtrade().getTrade(trade=trade, env_kwargs=env_kwargs)

    # S8-定义行动（action）
    # df_actions, df_account_value = RLaction().getAction(e_trade_gym=e_trade_gym)

    # S9-回测（backtest）
    # RLbacktest().getBacktest(df_account_value=df_account_value)

    # S10-绘图（plot）
    # RLplot().plot_main()

    BOM_path=r'F:\PyProject\RLDemo\doc\update.html'
    webbrowser.open(BOM_path)

    pass


if __name__ == '__main__':
    main()
