import numpy as np
import random
import os

class QLearningAgent:
    """Q-learning强化学习代理"""
    
    def __init__(self, state_size, action_size, learning_rate=0.1, discount_factor=0.95, 
                 exploration_rate=1.0, exploration_decay=0.995, min_exploration_rate=0.01):
        """
        初始化Q-learning代理
        
        参数:
        - state_size: 状态空间大小
        - action_size: 动作空间大小
        - learning_rate: 学习率
        - discount_factor: 折扣因子
        - exploration_rate: 初始探索率
        - exploration_decay: 探索率衰减
        - min_exploration_rate: 最小探索率
        """
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.exploration_decay = exploration_decay
        self.min_exploration_rate = min_exploration_rate
        
        # 初始化Q表
        self.q_table = {}
    
    def get_action(self, state, training=True):
        """
        根据当前状态选择动作
        
        参数:
        - state: 当前状态
        - training: 是否处于训练模式
        
        返回:
        - 选择的动作
        """
        # 转换状态为可哈希类型
        state_key = self._state_to_key(state)
        
        # 如果状态不在Q表中，添加一个新条目
        if state_key not in self.q_table:
            self.q_table[state_key] = np.zeros(self.action_size)
        
        # 探索-利用策略
        if training and random.random() < self.exploration_rate:
            # 探索: 随机选择动作
            return random.randint(0, self.action_size - 1)
        else:
            # 利用: 选择Q值最高的动作
            return np.argmax(self.q_table[state_key])
    
    def learn(self, state, action, reward, next_state, done):
        """
        更新Q值
        
        参数:
        - state: 当前状态
        - action: 执行的动作
        - reward: 获得的奖励
        - next_state: 下一个状态
        - done: 是否结束
        """
        # 转换状态为可哈希类型
        state_key = self._state_to_key(state)
        next_state_key = self._state_to_key(next_state)
        
        # 如果状态不在Q表中，添加新条目
        if state_key not in self.q_table:
            self.q_table[state_key] = np.zeros(self.action_size)
        
        if next_state_key not in self.q_table:
            self.q_table[next_state_key] = np.zeros(self.action_size)
        
        # Q-learning更新公式
        current_q = self.q_table[state_key][action]
        
        # 如果游戏结束，不考虑下一状态的Q值
        if done:
            max_next_q = 0
        else:
            max_next_q = np.max(self.q_table[next_state_key])
        
        # 计算新的Q值
        new_q = current_q + self.learning_rate * (reward + self.discount_factor * max_next_q - current_q)
        
        # 更新Q表
        self.q_table[state_key][action] = new_q
        
        # 衰减探索率
        if self.exploration_rate > self.min_exploration_rate:
            self.exploration_rate *= self.exploration_decay
    
    def _state_to_key(self, state):
        """将状态转换为可哈希类型"""
        return tuple(state)
    
    def save_q_table(self, filepath):
        """保存Q表到文件"""
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(filepath), exist_ok=True)
            np.save(filepath, self.q_table)
            print(f"Q表已保存到 {filepath}")
            return True
        except Exception as e:
            print(f"保存Q表时出错: {e}")
            return False
    
    def load_q_table(self, filepath):
        """从文件加载Q表"""
        try:
            self.q_table = np.load(filepath, allow_pickle=True).item()
            print(f"从 {filepath} 加载Q表成功")
            return True
        except Exception as e:
            print(f"加载Q表时出错: {e}")
            return False