import torch
import numpy as np


class QLearning:
    """ Q-learning算法（支持GPU加速） """
    def __init__(self, state_dim, action_dim, learning_rate, gamma, epsilon, device):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.learning_rate = learning_rate
        self.gamma = gamma  # 折扣因子
        self.epsilon = epsilon  # epsilon-贪婪策略
        self.device = device

        # 初始化 Q-table，存储在 GPU 或 CPU 上
        self.q_table = torch.zeros((state_dim, action_dim), device=self.device)

    def state_to_index(self, state):
        """ 如果 state 是一个向量，转换为离散索引 """
        return hash(tuple(state)) % self.state_dim  # 哈希映射到索引范围
    
    def select_action(self, state, valid_indices, training = True):
        """ 使用 ε-greedy 选择动作，但仅限于合法动作 """

        state_idx = self.state_to_index(state)  # 先转换索引
        q_values = []

        #需要把空动作剔除
        if len(valid_indices) > 1:
            valid_indices.pop()

        if training:
            if np.random.rand() < self.epsilon:  # 以 ε 概率随机选动作
                action_idx = np.random.choice(valid_indices)
            else:  # 以 1-ε 概率选择 Q-table 中最佳动作
                for i in range(len(valid_indices)):
                    q_values.append(self.q_table[state_idx, valid_indices[i]])   # 只考虑合法动作的 Q 值

                best_action_local_idx = np.argmax(q_values)  # 选最高 Q 值动作
                action_idx = valid_indices[best_action_local_idx]  # 映射回全局索引
        else:
            for i in range(len(valid_indices)):
                q_values.append(self.q_table[state_idx, valid_indices[i]])   # 只考虑合法动作的 Q 值

            best_action_local_idx = np.argmax(q_values)  # 选最高 Q 值动作
            action_idx = valid_indices[best_action_local_idx]  # 映射回全局索引


        return action_idx

    def take_action(self, state):
        """ ε-贪婪策略选择动作 """
        state_idx = self.state_to_index(state)  # 先转换索引
        if np.random.random() < self.epsilon:
            action = np.random.randint(self.action_dim)
        else:
            action = torch.argmax(self.q_table[state_idx]).item()
        return action


    def update(self, state, action, reward, next_state, done, next_valid_indices):
        """ 使用 Q-learning 更新 Q-table """
        state_idx = self.state_to_index(state)  # 先转换索引
        states = torch.tensor(state_idx, dtype=torch.long, device=self.device)
        actions = torch.tensor(action, dtype=torch.long, device=self.device).view(-1, 1)
        rewards = torch.tensor(reward, dtype=torch.float, device=self.device).view(-1, 1)
        next_state_idx = self.state_to_index(next_state)
        next_states = torch.tensor(next_state_idx, dtype=torch.long, device=self.device)
        dones = torch.tensor(done, dtype=torch.float, device=self.device).view(-1, 1)

        # 计算 TD 目标值
        if next_valid_indices:  # 确保有合法动作
            next_q_value = self.q_table[next_states, next_valid_indices].max()
        else:
            next_q_value = torch.tensor(0.0, device=self.device)  # 没有合法动作时，未来 Q 值设为 0

        q_targets = rewards + self.gamma * next_q_value * (1 - dones)

        # 更新 Q-table，只更新合法的 (state, action)
        self.q_table[states, actions] += self.learning_rate * (q_targets - self.q_table[states, actions])


