from __future__ import annotations

import itertools
import sys
import os

import pandas as pd
from stable_baselines3.common.logger import configure
from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3

from finrl.agents.stablebaselines3.models import DRLAgent
from finrl.config import DATA_SAVE_DIR
from finrl.config import INDICATORS
from finrl.config import RESULTS_DIR
from finrl.config import TENSORBOARD_LOG_DIR
from finrl.config import TRAINED_MODEL_DIR
from finrl.config_tickers import CHINA_3_TICKER
from finrl.main import check_and_make_directories
from finrl.meta.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.meta.preprocessor.preprocessors import data_split
from finrl.meta.preprocessor.preprocessors import FeatureEngineer
from finrl.meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.plot import backtest_stats
from finrl.plot import get_baseline
from finrl.plot import plot_return
from datetime import datetime

# matplotlib.use('Agg')


def load_or_train_model(model_name, agent, model, model_kwargs=None, total_timesteps=None):
    """加载已有模型或训练新模型"""
    model_path = os.path.join(TRAINED_MODEL_DIR, f"{model_name}.zip")
    
    if os.path.exists(model_path):
        print(f"Loading existing {model_name} model...")
        if model_name == "a2c":
            return A2C.load(model_path)
        elif model_name == "ddpg":
            return DDPG.load(model_path)
        elif model_name == "ppo":
            return PPO.load(model_path)
        elif model_name == "sac":
            return SAC.load(model_path)
        elif model_name == "td3":
            return TD3.load(model_path)
    else:
        print(f"Training new {model_name} model...")
        # 设置日志
        tmp_path = RESULTS_DIR + f"/{model_name}"
        new_logger = configure(tmp_path, ["stdout", "csv", "tensorboard"])
        model.set_logger(new_logger)
        
        # 训练模型
        trained_model = agent.train_model(
            model=model, 
            tb_log_name=model_name, 
            total_timesteps=total_timesteps
        )
        
        # 保存模型
        trained_model.save(model_path)
        return trained_model


def stock_trading(
    train_start_date: str,
    train_end_date: str,
    trade_start_date: str,
    trade_end_date: str,
    if_store_actions: bool = True,
    if_store_result: bool = True,
    if_using_a2c: bool = True,
    if_using_ddpg: bool = True,
    if_using_ppo: bool = True,
    if_using_sac: bool = True,
    if_using_td3: bool = True,
):
    sys.path.append("../FinRL")
    check_and_make_directories(
        [DATA_SAVE_DIR, TRAINED_MODEL_DIR, TENSORBOARD_LOG_DIR, RESULTS_DIR]
    )
    date_col = "date"
    tic_col = "tic"
    df = YahooDownloader(
        start_date=train_start_date, end_date=trade_end_date, ticker_list=CHINA_3_TICKER
    ).fetch_data()
    # env_trade, obs_trade = e_trade_gym.get_sb_env()
    print("\n=== 最近5天的最新交易数据 ===")
    print(df.tail(5))

    fe = FeatureEngineer(
        use_technical_indicator=True,
        tech_indicator_list=INDICATORS,
        use_vix=True,
        use_turbulence=True,
        user_defined_feature=False,
    )

    processed = fe.preprocess_data(df)
    list_ticker = processed[tic_col].unique().tolist()
    list_date = list(
        pd.date_range(processed[date_col].min(), processed[date_col].max()).astype(str)
    )
    combination = list(itertools.product(list_date, list_ticker))

    init_train_trade_data = pd.DataFrame(
        combination, columns=[date_col, tic_col]
    ).merge(processed, on=[date_col, tic_col], how="left")
    init_train_trade_data = init_train_trade_data[
        init_train_trade_data[date_col].isin(processed[date_col])
    ]
    init_train_trade_data = init_train_trade_data.sort_values([date_col, tic_col])

    init_train_trade_data = init_train_trade_data.fillna(0)

    init_train_data = data_split(
        init_train_trade_data, train_start_date, train_end_date
    )
    init_trade_data = data_split(
        init_train_trade_data, trade_start_date, trade_end_date
    )

    stock_dimension = len(init_train_data.tic.unique())
    state_space = 1 + 2 * stock_dimension + len(INDICATORS) * stock_dimension
    print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
    buy_cost_list = sell_cost_list = [0.001] * stock_dimension
    num_stock_shares = [0] * stock_dimension
    # 根据数据量大小设置
    #半年数据 50000
    #一年数据 100000
    #两年数据 200000
    total_timesteps = 100000
    # 初始资金金额设置为100万
    initial_amount = 1000000
    # 环境参数配置
    env_kwargs = {
        "hmax": 50000,  # 每次交易最大股票数量限制
        "initial_amount": initial_amount,  # 初始资金
        "num_stock_shares": num_stock_shares,  # 每支股票的持仓数量
        "buy_cost_pct": buy_cost_list,  # 买入手续费率列表
        "sell_cost_pct": sell_cost_list,  # 卖出手续费率列表
        "state_space": state_space,  # 状态空间维度
        "stock_dim": stock_dimension,  # 股票维度(即股票数量)
        "tech_indicator_list": INDICATORS,  # 技术指标列表
        "action_space": stock_dimension,  # 动作空间维度
        "reward_scaling": 1e-4,  # 奖励缩放因子
    }

    e_train_gym = StockTradingEnv(df=init_train_data, **env_kwargs)

    env_train, _ = e_train_gym.get_sb_env()
    print(type(env_train))


    if if_using_a2c:
        agent = DRLAgent(env=env_train)
        model_a2c = agent.get_model("a2c")
        trained_a2c = load_or_train_model("a2c", agent, model_a2c, total_timesteps=total_timesteps)

    if if_using_ddpg:
        agent = DRLAgent(env=env_train)
        model_ddpg = agent.get_model("ddpg")
        trained_ddpg = load_or_train_model("ddpg", agent, model_ddpg, total_timesteps=total_timesteps)

    if if_using_ppo:
        agent = DRLAgent(env=env_train)
        PPO_PARAMS = {
            "n_steps": 2048,
            "ent_coef": 0.01,
            "learning_rate": 0.00025,
            "batch_size": 128,
        }
        model_ppo = agent.get_model("ppo", model_kwargs=PPO_PARAMS)
        trained_ppo = load_or_train_model("ppo", agent, model_ppo, total_timesteps=total_timesteps)

    if if_using_sac:
        agent = DRLAgent(env=env_train)
        SAC_PARAMS = {
            "batch_size": 128,
            "buffer_size": 100000,
            "learning_rate": 0.0001,
            "learning_starts": 100,
            "ent_coef": "auto_0.1",
        }
        model_sac = agent.get_model("sac", model_kwargs=SAC_PARAMS)
        trained_sac = load_or_train_model("sac", agent, model_sac, total_timesteps=total_timesteps)

    if if_using_td3:
        agent = DRLAgent(env=env_train)
        TD3_PARAMS = {"batch_size": 100, "buffer_size": 1000000, "learning_rate": 0.001}
        model_td3 = agent.get_model("td3", model_kwargs=TD3_PARAMS)
        trained_td3 = load_or_train_model("td3", agent, model_td3, total_timesteps=total_timesteps)

    # trade
    e_trade_gym = StockTradingEnv(
        df=init_trade_data,
        turbulence_threshold=70,
        risk_indicator_col="vix",
        **env_kwargs,
    )

    if if_using_a2c:
        result_a2c, actions_a2c = DRLAgent.DRL_prediction(
            model=trained_a2c, environment=e_trade_gym
        )

    if if_using_ddpg:
        result_ddpg, actions_ddpg = DRLAgent.DRL_prediction(
            model=trained_ddpg, environment=e_trade_gym
        )

    if if_using_ppo:
        result_ppo, actions_ppo = DRLAgent.DRL_prediction(
            model=trained_ppo, environment=e_trade_gym
        )

    if if_using_sac:
        result_sac, actions_sac = DRLAgent.DRL_prediction(
            model=trained_sac, environment=e_trade_gym
        )

    if if_using_td3:
        result_td3, actions_td3 = DRLAgent.DRL_prediction(
            model=trained_td3, environment=e_trade_gym
        )

    # in python version, we should check isinstance, but in notebook version, it is not necessary
    if if_using_a2c and isinstance(result_a2c, tuple):
        actions_a2c = result_a2c[1]
        result_a2c = result_a2c[0]
    if if_using_ddpg and isinstance(result_ddpg, tuple):
        actions_ddpg = result_ddpg[1]
        result_ddpg = result_ddpg[0]
    if if_using_ppo and isinstance(result_ppo, tuple):
        actions_ppo = result_ppo[1]
        result_ppo = result_ppo[0]
    if if_using_sac and isinstance(result_sac, tuple):
        actions_sac = result_sac[1]
        result_sac = result_sac[0]
    if if_using_td3 and isinstance(result_td3, tuple):
        actions_td3 = result_td3[1]
        result_td3 = result_td3[0]

    current_date = datetime.now().strftime('%Y-%m-%d')
    # 保存交易动作
    if if_store_actions:
        actions_a2c.to_csv(f"actions_a2c_{current_date}.csv") if if_using_a2c else None
        actions_ddpg.to_csv(f"actions_ddpg_{current_date}.csv") if if_using_ddpg else None
        actions_td3.to_csv(f"actions_td3_{current_date}.csv") if if_using_td3 else None
        actions_ppo.to_csv(f"actions_ppo_{current_date}.csv") if if_using_ppo else None
        actions_sac.to_csv(f"actions_sac_{current_date}.csv") if if_using_sac else None

    # dji
    dji_ = get_baseline(ticker="000001.SS", start=trade_start_date, end=trade_end_date)
    dji = pd.DataFrame()
    dji[date_col] = dji_[date_col]
    dji["000001.SS"] = dji_["close"]
    # select the rows between trade_start and trade_end (not included), since some values may not in this region
    dji = dji.loc[
        (dji[date_col] >= trade_start_date) & (dji[date_col] < trade_end_date)
    ]

    result = dji

    if if_using_a2c:
        result_a2c.rename(columns={"account_value": "A2C"}, inplace=True)
        result = pd.merge(result, result_a2c, how="left")
    if if_using_ddpg:
        result_ddpg.rename(columns={"account_value": "DDPG"}, inplace=True)
        result = pd.merge(result, result_ddpg, how="left")
    if if_using_td3:
        result_td3.rename(columns={"account_value": "TD3"}, inplace=True)
        result = pd.merge(result, result_td3, how="left")
    if if_using_ppo:
        result_ppo.rename(columns={"account_value": "PPO"}, inplace=True)
        result = pd.merge(result, result_ppo, how="left")
    if if_using_sac:
        result_sac.rename(columns={"account_value": "SAC"}, inplace=True)
        result = pd.merge(result, result_sac, how="left")

    # remove the rows with nan
    result = result.dropna(axis=0, how="any")

    # calc the column name of strategies, including DJI
    col_strategies = []
    for col in result.columns:
        if col != date_col and col != "" and "Unnamed" not in col:
            col_strategies.append(col)

    # make sure that the first row of DJI is initial_amount
    col = "000001.SS"
    result[col] = result[col] / result[col].iloc[0] * initial_amount
    result = result.reset_index(drop=True)

    # stats
    for col in col_strategies:
        stats = backtest_stats(result, value_col_name=col)
        print("\nstats of " + col + ": \n", stats)

    # 保存交易结果
    print("result: ", result)
    if if_store_result:
        result.to_csv(f"result{current_date}.csv")

    # 绘制收益图
    plot_return(
        result=result,
        column_as_x=date_col,
        if_need_calc_return=True,
        savefig_filename="stock_trading.png",
        xlabel="Date",
        ylabel="Return",
        if_transfer_date=True,
        num_days_xticks=20,
    )


if __name__ == "__main__":
    # https://finance.yahoo.com/
    train_start_date = "2023-12-01"     # 训练开始时间
    train_end_date = "2025-01-23"       # 训练结束时间
    trade_start_date = "2024-12-01"    # 交易开始时间
    trade_end_date = "2025-01-23"      # 交易结束时间
    # 是否存储交易动作
    if_store_actions = True
    # 是否存储交易结果
    if_store_result = True
    # 是否使用 Advantage Actor-Critic (A2C) 算法
    # 优势:
    # - 计算效率高,训练速度快
    # - 内存占用低
    # - 适合简单的连续动作空间
    # 劣势:
    # - 策略不够稳定
    # - 对超参数比较敏感
    # - 收敛性较差
    if_using_a2c = True

    # 是否使用 Deep Deterministic Policy Gradient (DDPG) 算法
    # 优势:
    # - 可以处理连续动作空间
    # - 样本利用率高
    # - 适合处理确定性环境
    # 劣势:
    # - 训练不稳定
    # - 容易过拟合
    # - 对噪声敏感
    if_using_ddpg = True

    # 是否使用 Proximal Policy Optimization (PPO) 算法
    # 优势:
    # - 训练稳定性好
    # - 实现简单,易于调试
    # - 适合各种任务类型
    # 劣势:
    # - 样本效率相对较低
    # - 训练速度较慢
    # - 需要较多计算资源
    if_using_ppo = False

    # 是否使用 Soft Actor-Critic (SAC) 算法
    # 优势:
    # - 样本效率高
    # - 性能稳定
    # - 适合探索性强的任务
    # 劣势:
    # - 实现复杂
    # - 计算开销大
    # - 训练时间长
    if_using_sac = False

    # 是否使用 Twin Delayed Deep Deterministic Policy Gradient (TD3) 算法
    # 优势:
    # - 性能优于DDPG
    # - 克服过估计问题
    # - 训练更稳定
    # 劣势:
    # - 计算复杂度高
    # - 超参数较多
    # - 收敛速度慢
    if_using_td3 = False

    stock_trading(
        train_start_date=train_start_date,
        train_end_date=train_end_date,
        trade_start_date=trade_start_date,
        trade_end_date=trade_end_date,
        if_store_actions=if_store_actions,
        if_store_result=if_store_result,
        if_using_a2c=if_using_a2c,
        if_using_ddpg=if_using_ddpg,
        if_using_ppo=if_using_ppo,
        if_using_sac=if_using_sac,
        if_using_td3=if_using_td3,
    )
