import math
import threading

import torch
import numpy as np
from typing import List, Optional, Dict, Any, Tuple
from collections import deque
import random
import copy
from src.beckend.dpn import DeepQNetwork

# 导入ExperiencePool
from src.beckend.ExperiencePool import Experience as PoolExperience
from src.beckend.ExperiencePool import PrioritizedTransitionReplay, NStepTransitionAccumulator

from typing import TYPE_CHECKING
if TYPE_CHECKING:
    from Environment import Environment

# 状态向量类型定义
StateVector = tuple[np.ndarray, np.ndarray]


class Experience:
    """经验数据类 - 适配我们游戏环境的版本"""

    def __init__(self, oldState: StateVector, newState: StateVector,
                 action: int, envReward: float, episode_id: str, round_number: int):
        self.oldState = oldState
        self.newState = newState
        self.action = action
        self.envReward = envReward
        self.episode_id = episode_id
        self.round_number = round_number

    def to_pool_experience(self) -> PoolExperience:
        """转换为ExperiencePool中的Experience格式"""
        return PoolExperience(
            state_last=self.oldState,
            action=self.action,
            reward=self.envReward,
            state=self.newState,
            game_id=self.episode_id,
            round_num=self.round_number
        )

    def to_dict(self) -> Dict[str, Any]:
        """转换为字典格式"""
        return {
            'oldState': (self.oldState[0].tolist(), self.oldState[1].tolist()),
            'newState': (self.newState[0].tolist(), self.newState[1].tolist()),
            'action': self.action,
            'envReward': self.envReward,
            'episode_id': self.episode_id,
            'round_number': self.round_number
        }

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'Experience':
        """从字典创建Experience"""
        return cls(
            oldState=(np.array(data['oldState'][0]), np.array(data['oldState'][1])),
            newState=(np.array(data['newState'][0]), np.array(data['newState'][1])),
            action=data['action'],
            envReward=data['envReward'],
            episode_id=data['episode_id'],
            round_number=data['round_number']
        )


class DecisionMaker:
    """决策制定器"""

    def __init__(self, epsilon: float = 1, epsilon_min: float = 0,
                 epsilon_decay: float = 0.95):
        """
        初始化决策制定器

        参数:
            epsilon: 初始探索率
            epsilon_min: 最小探索率
            epsilon_decay: 探索率衰减率
        """
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay

    def make_decision(self, env: 'Environment', q_values: np.ndarray, training: bool = True) -> int:
        """
        根据Q值输出制定决策（ε-greedy策略）

        参数:
            q_values: Q值数组
            training: 是否在训练模式（影响探索）

        返回:
            selected_action: 选择的动作索引
        """
        if training:
            if random.random() <= self.epsilon:
                return random.randint(0, len(q_values) - 1)
            else:
                return np.argmax(q_values)
        else:
            # 利用：选择最大Q值对应的动作
            valid_actions = env.agent.get_valid_actions(env)
            max_q = -math.inf
            max_i = -1
            q_values = q_values.flatten()
            for i in valid_actions:
                if q_values[i].item() > max_q:
                    max_q = q_values[i]
                    max_i = i
            return np.argmax(q_values) if max_i == -1 else max_i

    def decay_epsilon(self) -> None:
        """衰减探索率"""
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

    def set_epsilon(self, epsilon: float) -> None:
        """设置探索率"""
        self.epsilon = max(self.epsilon_min, min(epsilon, 1.0))

    def get_config(self) -> Dict[str, Any]:
        """获取配置"""
        return {
            'epsilon': self.epsilon,
            'epsilon_min': self.epsilon_min,
            'epsilon_decay': self.epsilon_decay
        }


class ExperiencePoolAdapter:
    """经验池适配器 - ExperiencePool适配到我们的接口"""

    def __init__(self, capacity: int = 20000, batch_size: int = 64,
                 use_per: bool = True, n_steps: int = 6):
        """
        初始化经验池适配器

        参数:
            capacity: 经验池容量
            batch_size: 批次大小
            use_per: 是否使用优先经验回放
        """
        self.capacity = capacity
        self.batch_size = batch_size
        self.use_per = use_per
        self.n_steps = n_steps
        # 使用优先经验回放实现
        if use_per:
            self.pool = PrioritizedTransitionReplay(
                capacity=capacity,
                priority_exponent=0.6,
                importance_sampling_exponent=lambda t: 0.4,
                uniform_sample_probability=0.1,
                normalize_weights=True,
                random_state=np.random.RandomState(42)
            )
        else:
            # 对于非PER模式，使用简单的列表
            self.origin_experiences = deque(maxlen=capacity)

        # N步转换累积器
        self.n_step_accumulator = NStepTransitionAccumulator(n=self.n_steps, discount=0.7)

    def add_experience(self, exp: Experience) -> None:
        """添加经验到经验池"""
        if self.use_per:
            # 使用优先经验回放
            pool_exp = exp.to_pool_experience()
            pool_exp = self.n_step_accumulator.add_experience(pool_exp)
            if pool_exp is not None:
                self.pool.add(pool_exp, priority=1.0)  # 初始优先级为1.0
        else:
            # 简单模式
            self.origin_experiences.append(exp)

    def remove_episode(self, id: str) -> None:
        if not self.use_per or id not in self.n_step_accumulator.experiences:
            return
        exps = self.n_step_accumulator.experiences.pop(id)
        if exps is not None:
            for exp in exps:
                self.pool.add(exp, priority=1.0)

    def get_next_batch(self) -> Tuple[List[Experience], Optional[list[int]], Optional[np.ndarray]]:
        """获取下一个训练批次"""
        if self.use_per:
            if self.pool.size < self.batch_size:
                return [], None, None

            # 从优先回放中采样
            batch, ids, weights = self.pool.sample(self.batch_size)

            # 转换为我们的Experience格式
            experiences = []
            for exp in batch:
                our_exp = Experience(
                    oldState=exp.state_last,
                    newState=exp.state,
                    action=exp.action,
                    envReward=exp.reward,
                    episode_id=exp.game_id,
                    round_number=exp.round_num
                )
                experiences.append(our_exp)

            return experiences, ids, weights
        else:
            # 简单均匀采样
            if len(self.origin_experiences) < self.batch_size:
                return [], None, None

            experiences = random.sample(self.origin_experiences, self.batch_size)
            return experiences, None, None

    def update_priorities(self, ids: List[int], priorities: List[float]) -> None:
        """更新优先经验回放的优先级"""
        if self.use_per:
            self.pool.update_priorities(ids, priorities)


class ModelManager:
    """模型管理器 - 整合DQN、经验池和决策器"""

    def __init__(self,
                 state_shape: tuple = (17, 17, 6),
                 action_size: int = 12,
                 use_double_dqn: bool = True,
                 use_dueling: bool = True,
                 use_per: bool = True,
                 n_step: int = 6,
                 memory_size: int = 20000,
                 batch_size: int = 64,
                 target_update_freq: int = 1000,
                 learning_rate: float = 0.001,
                 gamma: float = 0.99):
        """
        初始化模型管理器
        """
        self.state_shape = state_shape
        self.action_size = action_size
        self.training_mode = True
        self.train_step = 0
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        # 初始化DQN网络
        self.qNetwork = DeepQNetwork(
            state_shape=state_shape,
            action_size=action_size,
            use_double_dqn=use_double_dqn,
            use_dueling=use_dueling,
            batch_size=batch_size,
            target_update_freq=target_update_freq,
            learning_rate=learning_rate,
            gamma=gamma
        )

        # 目标网络（深拷贝）
        self.targetNetwork = copy.deepcopy(self.qNetwork)
        self.targetNetwork.eval()

        # 经验池适配器
        self.experiencePool = ExperiencePoolAdapter(
            capacity=memory_size,
            batch_size=batch_size,
            use_per=use_per,
            n_steps=n_step
        )
        self.n_steps = n_step
        # 决策制定器
        self.decisionMaker = DecisionMaker()
        self.qNetwork = self.qNetwork.to(self.device)
        self.targetNetwork = self.targetNetwork.to(self.device)
        self._lock = threading.Lock()
        self._get_lock = threading.Lock()

    def make_decision(self, env: 'Environment', state: StateVector) -> int:
        """
        根据当前状态制定决策
        """
        with self._get_lock:
            # 获取Q值
            spatial_map, agent_features = state
            with torch.no_grad():
                map_tensor = torch.FloatTensor(spatial_map).permute(2, 0, 1).unsqueeze(0).to(self.device) # [1, 5, 17, 17]
                agent_tensor = torch.FloatTensor(agent_features).unsqueeze(0).to(self.device) # [1, 3]
                # print(map_tensor.shape, agent_tensor.shape)
                q_values = self.qNetwork.forward(
                    map_tensor, agent_tensor
                )
                q_values = q_values.cpu().numpy()
            # 制定决策
            action = self.decisionMaker.make_decision(
                env,
                q_values,
                training=self.training_mode
            )

            return action

    def commit_experience(self, exp: Experience) -> None:
        """
        提交经验到经验池
        """
        # 将经验添加到经验池
        if not self.training_mode:
            return
        with self._lock:
            self.experiencePool.add_experience(exp)

    def episode_clear(self, id: str) -> None:
        with self._lock:
            self.experiencePool.remove_episode(id)

    def train_one_batch(self) -> float:
        if not self.training_mode:
            return 0

        # 从经验池获取批次
        try:
            experiences, ids, weights = self.experiencePool.get_next_batch()
        except Exception:
            return 0
        if len(experiences) == 0:
            return 0

        # 转换为张量
        states_old = torch.FloatTensor(np.array([exp.oldState[0] for exp in experiences])).to(self.device)      # [B, H, W, C]
        states_agent = torch.FloatTensor(np.array([exp.oldState[1] for exp in experiences])).to(self.device)   # [B, F]
        actions = torch.LongTensor([exp.action for exp in experiences]).to(self.device)              # [B]
        rewards = torch.FloatTensor([exp.envReward for exp in experiences]).to(self.device)          # [B] ← N-step 累积奖励！
        next_states = torch.FloatTensor(np.array([exp.newState[0] for exp in experiences])).to(self.device)    # [B, H, W, C]
        next_states_agent = torch.FloatTensor(np.array([exp.newState[1] for exp in experiences])).to(self.device) # [B, F]

        if weights is not None:
            weights = torch.FloatTensor(weights).to(self.device)  # [B]
        else:
            weights = None
        
        # 调用 replay 训练
        loss, td_errors = self.qNetwork.replay(self.targetNetwork,
            states_old, states_agent, actions, rewards, 
            next_states, next_states_agent, weights, n_steps = self.n_steps
        )

        if self.experiencePool.use_per and ids is not None:
            priorities = np.abs(td_errors) + 1e-6
            self.experiencePool.update_priorities(ids, priorities.tolist())
            
        self.train_step += 1
        if self.train_step % 10 == 0:
            print(f"batch {self.train_step}, loss {loss}")
        if self.train_step % self.qNetwork.target_update_freq == 0:
            self._update_target_network() # 从dqn移到这里
            print("已覆盖目标网络")
        return loss

    def _update_target_network(self) -> None:
        """更新目标网络参数"""
        self.targetNetwork.load_state_dict(self.qNetwork.state_dict())

    def set_training_state(self, state: bool) -> None:
        """
        设置训练状态
        """
        self.training_mode = state

        if not state:
            self.qNetwork.eval()
        else:
            self.qNetwork.train()

    def load_model(self, path: str) -> None:
        """
        加载模型参数
        """
        try:
            self.qNetwork.load(path)
            self._update_target_network()
            print(f"模型已从 {path} 加载")
        except Exception as e:
            print(f"加载模型失败: {e}")

    def save_model(self, path: str) -> None:
        """
        保存模型参数
        """
        try:
            self.qNetwork.save(path)
            print(f"模型已保存到 {path}")
        except Exception as e:
            print(f"保存模型失败: {e}")

    def get_config(self) -> Dict[str, Any]:
        """获取当前配置"""
        return {
            'state_shape': self.state_shape,
            'action_size': self.action_size,
            'training_mode': self.training_mode,
            'train_step': self.train_step,
            'qnetwork_config': self.qNetwork.get_config(),
            'decision_maker_config': self.decisionMaker.get_config(),
            'use_per': self.experiencePool.use_per
        }

