import gymnasium as gym
import numpy as np
import pandas as pd
from stable_baselines3.common.vec_env import DummyVecEnv
from utils.utils import normalize_features_timewise
import random


class NewFuturesEnvLBC(gym.Env):
    """
    一个为深度强化学习设计的期货交易环境，采用 LBC 数据格式。
    (Length/Time, Batch/Assets, Channels/Features)

    该环境接收一个3D NumPy数组 (LBC) 作为市场数据，并输出一个3D状态张量
    (LBC)，其中包含了市场历史数据、当前现金和头寸信息。

    该数据格式使得环境内部的时间切片操作更直观、高效。
    同时，它完全兼容最新的 Gymnasium API 标准和 FinRL 框架的
    特定接口要求，可以直接用于 Stable Baselines3 和 FinRL-Meta。
    """

    def __init__(self, config: dict):
        super().__init__()
        # +++ 添加一个唯一的ID +++
        import time

        self.unique_id = f"{'Train' if config.get('if_train', True) else 'Validation'}_{time.time_ns()}"
        print(f"--- Environment Instance Created --- ID: {self.unique_id}")

        # --- 1. 数据处理与维度定义 (LBC Format) ---
        self.market_data_array = config["df"].astype(np.float32)
        self.total_timesteps = self.market_data_array.shape[0]
        self.future_count = self.market_data_array.shape[1]
        self.market_features_count = self.market_data_array.shape[2]
        self.window = config.get("window", 32)

        if self.total_timesteps <= self.window:
            raise ValueError("Data length (dim 0) must be greater than window size.")

        # --- 2. 核心交易参数 ---
        self.if_train = config.get("if_train", True)
        self.initial_capital = config.get("initial_capital", 1e8)
        self.max_hand = config.get("max_hand", 100)
        self.min_hand_rate = config.get("min_hand_rate", 0)
        self.leverage = config.get("leverage", 10)
        self.buy_cost_pct = config.get("buy_cost_pct", 1e-3)
        self.sell_cost_pct = config.get("sell_cost_pct", 1e-3)
        self.reward_scaling = config.get("reward_scaling", 1.0)
        self.max_position_per_contract = config.get("max_position_per_contract", 100)
        self.initial_future = (
            np.zeros(self.future_count, dtype=np.int32)
            if config.get("initial_future") is None
            else config["initial_future"]
        )
        # 奖励函数相关
        self.N = config.get("N", 5)
        self.forced_liquidation_penalty = config.get("forced_liquidation_penalty", 100)
        self.bonus_factor = config.get("bonus_factor", 0.1)
        self.min_profit_threshold = config.get("min_profit_threshold", 0.001)
        self.aux_reward_scaling = config.get("aux_reward_scaling", 0.1)

        self.max_step = self.total_timesteps - 1

        # --- 3. Gymnasium API 标准定义 ---
        self.observation_space = gym.spaces.Box(
            low=-np.inf,
            high=np.inf,
            shape=(self.future_count, self.window, self.market_features_count),
            dtype=np.float32,
        )
        self.action_space = gym.spaces.Box(
            low=-1, high=1, shape=(self.future_count,), dtype=np.float32
        )

        # --- 4. FinRL 兼容性适配层 ---
        self.df = pd.DataFrame(index=range(self.max_step))
        self.asset_memory = [self.initial_capital]
        self.rewards_memory = []
        self.actions_memory = []
        self.date_memory = [self.window]

        # --- 5. 内部状态变量初始化 ---
        self.day = None
        self.amount = None
        self.future = None
        self.avg_price = None
        self.margin_account = None
        self.float_profit = None
        self.total_asset = None
        self.prev_total_asset = None
        self.initial_total_asset = None
        self.forced_liquidation = None
        self.recent_returns = None
        self.max_episode_steps = config.get("max_episode_steps", 512)
        self.current_step = 0

    def reset(self, *, seed=None, options=None):

        super().reset(seed=seed)

        if self.if_train:
            self.future = self.initial_future.astype(np.int32)
            self.amount = self.initial_capital * np.random.uniform(0.95, 1.05)
            start_day_upper_bound = int(self.total_timesteps * 0.75)
            self.day = random.randint(self.window, start_day_upper_bound)
        else:
            self.future = self.initial_future.astype(np.int32)
            self.amount = self.initial_capital
            self.day = self.window

        # +++ CORRECTED +++: 添加了关键的保证金和均价初始化逻辑
        price = self.market_data_array[self.day, :, 0]
        self.margin_account = np.abs(self.future * price / self.leverage)
        self.avg_price = np.where(self.future != 0, price, 0.0)

        # +++ CORRECTED +++: 补充了完整的总资产计算逻辑
        self.float_profit = np.sum((price - self.avg_price) * self.future)
        self.total_asset = self.amount + self.float_profit + np.sum(self.margin_account)
        self.initial_total_asset = self.total_asset
        self.prev_total_asset = self.total_asset

        self.forced_liquidation = 0
        self.recent_returns = []

        self.asset_memory = [self.initial_total_asset]
        self.rewards_memory = []
        self.actions_memory = []
        self.date_memory = [self.day]
        self.current_step = 0
        print(
            f">>> RESET called on instance ID: {self.unique_id} init_day:{self.day}<<<"
        )

        return self.get_state(), {}

    def step(self, origin_actions: np.ndarray):
        target_positions = origin_actions * self.max_position_per_contract
        actions = (target_positions - self.future).astype(np.int32)
        # actions = (origin_actions * self.max_hand).astype(np.int32)
        price = self.market_data_array[self.day, :, 0]

        min_action = int(self.max_hand * self.min_hand_rate)

        # 开单动作处理 (来自你验证过的逻辑)
        for idx, act in enumerate(actions):
            price_i = price[idx]
            current_pos = self.future[idx]
            margin_i = self.margin_account[idx]
            avg_p = self.avg_price[idx]

            if act == 0:
                continue
            if act < -min_action:  # 做空
                short_volume = -act
                if current_pos > 0:
                    profit = (price_i - avg_p) * current_pos
                    self.amount += margin_i + profit
                    self.margin_account[idx], self.avg_price[idx], self.future[idx] = (
                        0,
                        0,
                        0,
                    )
                max_additional = self.max_position_per_contract + self.future[idx]
                short_volume = min(short_volume, max_additional)
                required_margin = price_i * short_volume / self.leverage
                cost = required_margin * (1 + self.buy_cost_pct)
                if self.amount >= cost:
                    new_pos = self.future[idx] - short_volume
                    if self.future[idx] == 0:
                        self.avg_price[idx] = price_i
                    else:
                        self.avg_price[idx] = (
                            self.avg_price[idx] * abs(self.future[idx])
                            + price_i * short_volume
                        ) / abs(new_pos)
                    self.future[idx], self.margin_account[idx], self.amount = (
                        new_pos,
                        self.margin_account[idx] + required_margin,
                        self.amount - cost,
                    )
            elif act > min_action:  # 做多
                long_volume = act
                if current_pos < 0:
                    profit = (avg_p - price_i) * abs(current_pos)
                    self.amount += margin_i + profit
                    self.margin_account[idx], self.avg_price[idx], self.future[idx] = (
                        0,
                        0,
                        0,
                    )
                max_additional = self.max_position_per_contract - self.future[idx]
                long_volume = min(long_volume, max_additional)
                required_margin = price_i * long_volume / self.leverage
                cost = required_margin * (1 + self.buy_cost_pct)
                if self.amount >= cost:
                    new_pos = self.future[idx] + long_volume
                    if self.future[idx] == 0:
                        self.avg_price[idx] = price_i
                    else:
                        self.avg_price[idx] = (
                            self.avg_price[idx] * abs(self.future[idx])
                            + price_i * long_volume
                        ) / abs(new_pos)
                    self.future[idx], self.margin_account[idx], self.amount = (
                        new_pos,
                        self.margin_account[idx] + required_margin,
                        self.amount - cost,
                    )

        # 结算
        self.day += 1
        self.current_step += 1
        pre_price = price
        price = self.market_data_array[self.day, :, 0]

        self.float_profit = np.sum((price - self.avg_price) * self.future)
        if (self.amount + self.float_profit) < 0:
            self.forced_liquidation = 1
            for idx in range(self.future_count):
                pos, avg_p, margin_i = (
                    self.future[idx],
                    self.avg_price[idx],
                    self.margin_account[idx],
                )
                if pos != 0:
                    profit = (
                        (price[idx] - avg_p) * pos
                        if pos > 0
                        else (avg_p - price[idx]) * abs(pos)
                    )
                    self.amount += margin_i + profit
                    self.margin_account[idx], self.avg_price[idx], self.future[idx] = (
                        0,
                        0,
                        0,
                    )
        else:
            self.forced_liquidation = 0

        self.float_profit = (
            0.0
            if self.forced_liquidation
            else np.sum((price - self.avg_price) * self.future)
        )
        self.total_asset = self.amount + self.float_profit + np.sum(self.margin_account)

        state = self.get_state()
        price_change_rate = (price - pre_price) / np.maximum(pre_price, 1e-9)
        the_action = actions / self.max_position_per_contract
        reward = self.get_reward(the_action, price_change_rate)

        self.prev_total_asset = self.total_asset
        terminated = (
            self.day == self.max_step
            or self.total_asset < self.initial_total_asset * 0.05
        )

        # 新增的 truncated 信号（达到最大步数）
        truncated = self.current_step >= self.max_episode_steps

        # 最终的 done 信号是两者的结合
        done = terminated or truncated

        self.asset_memory.append(self.total_asset)
        self.rewards_memory.append(reward)
        self.actions_memory.append(origin_actions)
        self.date_memory.append(self.day)
        # print(len(self.asset_memory))
        return state, reward, done, truncated, {}

    def get_state(self):
        market_data_window = self.market_data_array[self.day - self.window : self.day]
        return normalize_features_timewise(market_data_window.transpose(1, 0, 2))

    def get_reward(self, the_action: np.ndarray, price_change_rate: np.ndarray):
        """
        计算复合奖励：
        1. 主要奖励：基于投资组合总价值的对数收益率。
        2. 辅助奖励：基于 `the_action` 和 `price_change_rate`。
        """
        # --- 1. 计算主要奖励 (Main Reward) ---
        epsilon = 1e-8
        log_return = np.log(self.total_asset / (self.prev_total_asset + epsilon))
        main_reward = log_return

        # --- 2. 计算辅助奖励 (Auxiliary Reward) ---
        u = self.min_profit_threshold
        r_preds = the_action * (price_change_rate - np.sign(the_action) * u)
        aux_reward = np.sum(r_preds)

        # --- 3. 组合最终奖励 (Combined Reward) ---
        final_reward = (
            self.reward_scaling * main_reward + self.aux_reward_scaling * aux_reward
        )

        return final_reward

    # --- FinRL 兼容性方法 ---
    def get_sb_env(self):
        e = DummyVecEnv([lambda: self])
        obs = e.reset()
        return e, obs

    def save_asset_memory(self):
        date_list = self.date_memory
        asset_list = self.asset_memory
        print("in save_asset_memory!")
        print("len = ", len(self.asset_memory))
        df_account_value = pd.DataFrame(
            {"date": date_list, "account_value": asset_list}
        )
        return df_account_value

    def save_action_memory(self):
        date_list = self.date_memory[:-1]
        action_list = self.actions_memory
        df_actions = pd.DataFrame(action_list, index=date_list)
        df_actions.columns = [f"future_{i}" for i in range(self.future_count)]
        return df_actions

    def get_plot_data(self):
        actions = self.actions_memory
        price = self.market_data_array[
            self.window - 1 : len(actions) + self.window - 1, :, 0
        ]
        return actions, price
