import torch
import numpy as np
import os
import time
from typing import Dict, Tuple

# Tianshou-related imports
from tianshou.data import Collector, Batch, to_numpy
from tianshou.env import DummyVectorEnv
from tianshou.exploration import GaussianNoise
from tianshou.policy import TD3Policy
from tianshou.utils.net.common import Net
from tianshou.utils.net.continuous import Actor, Critic

# Import the custom environment
from env.wireless_power_env_vectorized import WirelessPowerEnv

loaded_policy = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'

def setup_policy(model_path='best_td3_agent.pth'):
    """
    Evaluates a trained TD3 agent.

    Args:
        model_path (str): Path to the saved model weights.
        num_episodes (int): Number of episodes to run for evaluation.
        render (bool): If True, renders the environment during evaluation.
    """
    # --- 1. Environment Setup ---
    global loaded_policy
    env_fn = lambda: WirelessPowerEnv()
    eval_env = DummyVectorEnv([env_fn])

    # --- 2. Network and Policy Setup ---
    env = env_fn()
    state_shape = env.observation_space.shape
    action_shape = env.action_space.shape
    max_action = env.action_space.high[0]
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Using device: {device}")

    # --- FIX: Wrapper for the Actor Network ---
    # The error "Unexpected key(s) in state_dict: 'actor.preprocess.model.model...'"
    # suggests the saved actor network was wrapped in an outer module with a 'model' attribute.
    # Actor 网络
    actor_net = Net(state_shape, hidden_sizes=[256, 256], device=device)
    actor = Actor(actor_net, action_shape, max_action=max_action, device=device).to(device)
    actor_optim = torch.optim.Adam(actor.parameters(), lr=5e-5)

    # 为 TD3 定义两个 Critic 网络和各自的优化器
    critic1_net = Net(state_shape, action_shape, hidden_sizes=[512, 512], concat=True, device=device)
    critic1 = Critic(critic1_net, device=device).to(device)
    critic1_optim = torch.optim.Adam(critic1.parameters(), lr=5e-5)

    critic2_net = Net(state_shape, action_shape, hidden_sizes=[512, 512], concat=True, device=device)
    critic2 = Critic(critic2_net, device=device).to(device)
    critic2_optim = torch.optim.Adam(critic2.parameters(), lr=5e-5)

    # Instantiate the policy
    policy = TD3Policy(
        actor=actor,
        actor_optim=actor_optim,
        critic1=critic1,
        critic1_optim=critic1_optim,
        critic2=critic2,
        critic2_optim=critic2_optim,
        tau=1e-3,
        gamma=0.99,
        exploration_noise=None,
        action_space=env.action_space,
    )

    # --- 3. Load Trained Weights ---
    if not os.path.exists(model_path):
        print(f"Error: Model file not found at '{model_path}'")
        return

    print(f"Loading model from: {model_path}")
    try:
        policy.load_state_dict(torch.load(model_path, map_location=device))
        print("Model loaded successfully.")
    except Exception as e:
        print(f"Error loading model state_dict: {e}")
        return

    # Set the policy to evaluation mode
    policy.eval()

    loaded_policy = policy


def get_action_for_state(state: np.ndarray) -> np.ndarray:
    """
    使用加载的策略为给定的状态获取确定性的动作。

    Args:
        state (np.ndarray): 环境状态，一个 NumPy 数组。

    Returns:
        np.ndarray: 策略计算出的动作。
    """
    global loaded_policy
    if loaded_policy is None:
        raise RuntimeError("策略尚未加载，请先调用 setup_policy() 函数。")

    # Tianshou 的策略期望输入一个 Batch 对象。
    # 我们创建一个只包含单个状态的 batch。
    # 'obs' 是观测值的标准键名。
    batch = Batch(obs=np.expand_dims(state, axis=0), info={})

    # 策略的前向传播会返回另一个包含结果的 Batch 对象。
    with torch.no_grad():
        result = loaded_policy(batch)
        act = to_numpy(result.act)
        action_remap = loaded_policy.map_action(act)[0]
    return action_remap

def get_best_action_from_noisy_actor(state: np.ndarray, num_samples: int = 100) -> np.ndarray:
    """
    对一个状态，采样多个带噪声的动作，并返回由Critic评估为最优的那个。

    Args:
        state (np.ndarray): 环境状态，一个 NumPy 数组。
        num_samples (int): 采样带有噪声的动作的数量。

    Returns:
        np.ndarray: 在采样中具有最高Q值的动作。
    """
    global loaded_policy
    if loaded_policy is None:
        raise RuntimeError("策略尚未加载，请先调用 setup_policy() 函数。")

    try:
        # 临时切换到训练模式以启用探索噪声
        loaded_policy.train()

        # 将单个状态重复N次，以进行批处理
        state_repeated = np.repeat(np.expand_dims(state, axis=0), num_samples, axis=0)
        batch = Batch(obs=state_repeated, info={})

        with torch.no_grad():
            result = loaded_policy(batch)
            noisy_actions = result.act
            q_values = loaded_policy.critic1(batch.obs, noisy_actions)
            best_action_index = torch.argmax(q_values)
            best_action = noisy_actions[best_action_index]
            act = to_numpy(best_action)
            action_remap = loaded_policy.map_action(act)
        return action_remap

    finally:
        # 关键：无论如何，最后都要确保切换回评估模式
        loaded_policy.eval()


if __name__ == '__main__':

    try:
        # --- 环境设置 ---
        env = WirelessPowerEnv()
        RL = 4.0
        rsmax = 0.1
        rpmax = 2.5 * rsmax
        sample_state = env.scale_observation(np.array([RL, rpmax, rsmax]))
        sample_state = np.array(sample_state, dtype=np.float32)
        env.RL = RL
        env.rsmax = rsmax
        env.rpmax = rpmax
        # sample_state, _ = env.reset()
        setup_policy(model_path='best_td3_agent_20250708-102538.pth')

        print(f"\n输入状态 (形状: {sample_state.shape}):\n{sample_state}")

        # --- 方法1: 获取标准的确定性动作 ---
        deterministic_action = get_action_for_state(sample_state)
        print(deterministic_action)
        print(f"\n--- 方法1: 确定性动作 ---")
        result = env._calculate_reward(deterministic_action, eval_mode=True)
        print(result)

        # # --- 方法2: 从带噪声的动作中获取最优动作 ---
        # best_noisy_action = get_best_action_from_noisy_actor(sample_state, num_samples=100)
        # print(f"\n--- 方法2: 从100个噪声样本中选出的最优动作 ---")
        # result = env._step(best_noisy_action, eval_mode=True)
        # print(result)
    except (FileNotFoundError, RuntimeError) as e:
        print(f"发生错误: {e}")
