from __future__ import annotations

import itertools
import sys

import pandas as pd
from stable_baselines3.common.logger import configure
import numpy as np

from finrl.agents.stablebaselines3.models import DRLAgent
from finrl.config import DATA_SAVE_DIR
from finrl.config import INDICATORS
from finrl.config import RESULTS_DIR
from finrl.config import TENSORBOARD_LOG_DIR
from finrl.config import TRAINED_MODEL_DIR
from finrl.config_tickers import CHINA_3_TICKER
from finrl.main import check_and_make_directories
from finrl.meta.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.meta.preprocessor.preprocessors import data_split
from finrl.meta.preprocessor.preprocessors import FeatureEngineer
from finrl.meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.plot import backtest_stats
from finrl.plot import get_baseline
from finrl.plot import plot_return

# matplotlib.use('Agg')


def stock_trading(
        train_start_date: str,
        train_end_date: str,
        trade_start_date: str,
        trade_end_date: str,
        if_store_actions: bool = True,
        if_store_result: bool = True,
        if_using_a2c: bool = True,
        if_using_ddpg: bool = True,
        if_using_ppo: bool = True,
        if_using_sac: bool = True,
        if_using_td3: bool = True,
):
    sys.path.append("../FinRL")
    check_and_make_directories(
        [DATA_SAVE_DIR, TRAINED_MODEL_DIR, TENSORBOARD_LOG_DIR, RESULTS_DIR]
    )
    date_col = "date"
    tic_col = "tic"
    df = YahooDownloader(
        start_date=train_start_date, end_date=trade_end_date, ticker_list=CHINA_3_TICKER
    ).fetch_data()
    fe = FeatureEngineer(
        use_technical_indicator=True,
        tech_indicator_list=INDICATORS,
        use_vix=True,
        use_turbulence=True,
        user_defined_feature=False,
    )

    processed = fe.preprocess_data(df)
    list_ticker = processed[tic_col].unique().tolist()
    list_date = list(
        pd.date_range(processed[date_col].min(), processed[date_col].max()).astype(str)
    )
    combination = list(itertools.product(list_date, list_ticker))

    init_train_trade_data = pd.DataFrame(
        combination, columns=[date_col, tic_col]
    ).merge(processed, on=[date_col, tic_col], how="left")
    init_train_trade_data = init_train_trade_data[
        init_train_trade_data[date_col].isin(processed[date_col])
    ]
    init_train_trade_data = init_train_trade_data.sort_values([date_col, tic_col])

    init_train_trade_data = init_train_trade_data.fillna(0)

    init_train_data = data_split(
        init_train_trade_data, train_start_date, train_end_date
    )
    init_trade_data = data_split(
        init_train_trade_data, trade_start_date, trade_end_date
    )

    stock_dimension = len(init_train_data.tic.unique())
    state_space = 1 + 2 * stock_dimension + len(INDICATORS) * stock_dimension
    print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
    buy_cost_list = sell_cost_list = [0.001] * stock_dimension
    num_stock_shares = [0] * stock_dimension

    # 初始资金金额设置为100万
    initial_amount = 1000000
    # 环境参数配置
    env_kwargs = {
        "hmax": 100,  # 每次交易最大股票数量限制
        "initial_amount": initial_amount,  # 初始资金
        "num_stock_shares": num_stock_shares,  # 每支股票的持仓数量
        "buy_cost_pct": buy_cost_list,  # 买入手续费率列表
        "sell_cost_pct": sell_cost_list,  # 卖出手续费率列表
        "state_space": state_space,  # 状态空间维度
        "stock_dim": stock_dimension,  # 股票维度(即股票数量)
        "tech_indicator_list": INDICATORS,  # 技术指标列表
        "action_space": stock_dimension,  # 动作空间维度
        "reward_scaling": 1e-4,  # 奖励缩放因子
    }

    e_train_gym = StockTradingEnv(df=init_trade_data, **env_kwargs)

    env_train, _ = e_train_gym.get_sb_env()
    print(type(env_train))

    # 5. 加载所有已训练好的模型
    agent = DRLAgent(env=e_train_gym)

    # 加载 A2C 模型
    if if_using_a2c:
        tmp_path = "G:\\beijzhixinkejiyouxiangongs\\python\\FinRL\\finrl\\applications\\stock_trading\\" + RESULTS_DIR + "/a2c"
        model_a2c = agent.get_model("a2c")
        trained_a2c = model_a2c.load(f"{tmp_path}/a2c_model.zip")

    # 加载 DDPG 模型
    if if_using_ddpg:
        tmp_path = RESULTS_DIR + "/ddpg"
        model_ddpg = agent.get_model("ddpg")
        trained_ddpg = model_ddpg.load(f"{tmp_path}/ddpg")

    # 加载 PPO 模型
    if if_using_ppo:
        PPO_PARAMS = {
            "n_steps": 2048,
            "ent_coef": 0.01,
            "learning_rate": 0.00025,
            "batch_size": 128,
        }
        tmp_path = RESULTS_DIR + "/ppo"
        model_ppo = agent.get_model("ppo", model_kwargs=PPO_PARAMS)
        trained_ppo = model_ppo.load(f"{tmp_path}/ppo")

    # 加载 SAC 模型
    if if_using_sac:
        SAC_PARAMS = {
            "batch_size": 128,
            "buffer_size": 100000,
            "learning_rate": 0.0001,
            "learning_starts": 100,
            "ent_coef": "auto_0.1",
        }
        tmp_path = RESULTS_DIR + "/sac"
        model_sac = agent.get_model("sac", model_kwargs=SAC_PARAMS)
        trained_sac = model_sac.load(f"{tmp_path}/sac")

    # 加载 TD3 模型
    if if_using_td3:
        TD3_PARAMS = {
            "batch_size": 100,
            "buffer_size": 1000000,
            "learning_rate": 0.001
        }
        tmp_path = RESULTS_DIR + "/td3"
        model_td3 = agent.get_model("td3", model_kwargs=TD3_PARAMS)
        trained_td3 = model_td3.load(f"{tmp_path}/td3")

    # 6. 使用所有已加载的模型进行预测
    if if_using_a2c:
        # 不再使用 DRLAgent.DRL_prediction
        # result_a2c, actions_a2c = DRLAgent.DRL_prediction(...)
        
        # 直接获取最新状态并预测
        latest_data = init_trade_data[init_trade_data.date == trade_end_date]
        
        # 检查是否有数据
        if latest_data.empty:
            print(f"错误：在 {trade_end_date} 没有找到交易数据")
            return {}
            
        # 构造状态向量
        state_space = 1 + 2 * stock_dimension + len(INDICATORS) * stock_dimension
        latest_state = np.zeros(state_space)
        
        try:
            # 填充状态向量
            latest_state[0] = env_kwargs['initial_amount']  # 当前资金
            
            for i, tic in enumerate(CHINA_3_TICKER):
                tic_data = latest_data[latest_data.tic == tic].iloc[0]
                latest_state[1 + i] = tic_data['close']  # 股票价格
                latest_state[1 + stock_dimension + i] = 0  # 初始持仓量
                
                # 技术指标
                for j, indicator in enumerate(INDICATORS):
                    latest_state[1 + 2*stock_dimension + i*len(INDICATORS) + j] = tic_data[indicator]
            
            # reshape状态向量
            latest_state = latest_state.reshape(1, -1)
            
            # 直接使用predict函数预测
            action, _states = trained_a2c.predict(latest_state)
            
            # 解释预测结果
            trade_signals = {}
            for i, tic in enumerate(CHINA_3_TICKER):
                if action[i] > 0:
                    signal = "买入"
                elif action[i] < 0:
                    signal = "卖出"
                else:
                    signal = "持有"
                trade_signals[tic] = {
                    "信号": signal,
                    "动作强度": float(action[i])  # 转换为Python float类型
                }
            
            print(f"\n{trade_end_date} 下一个交易日的预测信号：")
            for tic, signal in trade_signals.items():
                print(f"{tic}: {signal}")
                
            return trade_signals
            
        except Exception as e:
            print(f"预测过程中发生错误：{str(e)}")
            print(f"可用的日期范围：{init_trade_data.date.min()} 到 {init_trade_data.date.max()}")
            return {}

    if if_using_ddpg:
        result_ddpg, actions_ddpg = DRLAgent.DRL_prediction(
            model=trained_ddpg,
            environment=e_train_gym
        )

    if if_using_ppo:
        result_ppo, actions_ppo = DRLAgent.DRL_prediction(
            model=trained_ppo,
            environment=e_train_gym
        )

    if if_using_sac:
        result_sac, actions_sac = DRLAgent.DRL_prediction(
            model=trained_sac,
            environment=e_train_gym
        )

    if if_using_td3:
        result_td3, actions_td3 = DRLAgent.DRL_prediction(
            model=trained_td3,
            environment=e_train_gym
        )

    # 处理预测结果，获取最新的交易信号
    trade_signals = {} 
    print(actions_a2c)
    return trade_signals


if __name__ == "__main__":
    # https://finance.yahoo.com/
    train_start_date = "2023-01-01"     # 训练开始时间
    train_end_date = "2024-01-01"       # 训练结束时间
    trade_start_date = "2024-01-02"    # 交易开始时间
    trade_end_date = "2025-01-21"      # 交易结束时间
    # 是否存储交易动作
    if_store_actions = True
    # 是否存储交易结果
    if_store_result = True
    # 是否使用 Advantage Actor-Critic (A2C) 算法
    if_using_a2c = True
    # 是否使用 Deep Deterministic Policy Gradient (DDPG) 算法
    if_using_ddpg = True
    # 是否使用 Proximal Policy Optimization (PPO) 算法
    if_using_ppo = True
    # 是否使用 Soft Actor-Critic (SAC) 算法
    if_using_sac = True
    # 是否使用 Twin Delayed Deep Deterministic Policy Gradient (TD3) 算法
    if_using_td3 = True

    stock_trading(
        train_start_date=train_start_date,
        train_end_date=train_end_date,
        trade_start_date=trade_start_date,
        trade_end_date=trade_end_date,
        if_store_actions=if_store_actions,
        if_store_result=if_store_result,
        if_using_a2c=if_using_a2c,
        if_using_ddpg=if_using_ddpg,
        if_using_ppo=if_using_ppo,
        if_using_sac=if_using_sac,
        if_using_td3=if_using_td3,
    )
