﻿import numpy as np


def get_policy(nrow=4, ncol=4):
    # 生成 4x4 的随机整数矩阵，取值范围 [0, 4) 即 0,1,2,3 # 行 列 左下右上
    policy = np.random.randint(low=0, high=4, size=(nrow, ncol))
    return policy


class Agent:
    def __init__(self, env, c=2):
        self.policy = get_policy()
        self.env = env

        self.nrow = env.env.unwrapped.nrow
        self.ncol = env.env.unwrapped.ncol

        # 初始化 Q 表（状态数 x 动作数）
        self.num_states = self.nrow * self.ncol
        self.num_actions = 4  # 动作空间：0=左,1=下,2=右,3=上
        self.Q = np.zeros((self.num_states, self.num_actions))
        self.pos_try_count = np.zeros((self.num_states, self.num_actions))

        # 学习参数
        self.alpha = 0.1  # 学习率
        self.gamma = 0.99  # 折扣因子
        self.epsilon = 0.1  # 探索率

        # 初始化策略（基于当前 Q 表）
        self.policy = np.array(
            [
                [np.argmax(self.Q[i * self.ncol + j]) for j in range(self.ncol)]
                for i in range(self.nrow)
            ]
        )
        self.total_steps = 0
        self.c = c

    def choose_action(self) -> int:
        """根据当前状态选择动作"""
        state_idx = self.env.get_current_position()["state_index"]

        # 确保每个臂至少被选择一次

        if 0 in self.pos_try_count[state_idx]:
            return np.argmin(self.pos_try_count[state_idx])

        # 计算UCB值
        ucb_values = self.Q[state_idx] + self.c * np.sqrt(
            np.log(self.total_steps) / self.pos_try_count[state_idx]
        )
        return np.argmax(ucb_values)


    def update(self, current_state: int, action: int, reward: float, next_state: int):
        
        self.pos_try_count[current_state][action] += 1
        self.total_steps += 1

        # 计算目标 Q 值
        max_next_q = np.max(self.Q[next_state])
        target_q = reward + self.gamma * max_next_q

        # 更新 Q 值
        self.Q[current_state, action] = (1 - self.alpha) * self.Q[
            current_state, action
        ] + self.alpha * target_q

        # 更新策略
        row = current_state // self.ncol
        col = current_state % self.ncol
        self.policy[row, col] = np.argmax(self.Q[current_state])

    def remember(self, s, a, r, s_next):
        self.memory.append((s, a, r, s_next))

    def replay(self):
        if len(self.memory) < self.batch_size:
            return
        # 从记忆库中采样并更新
        minibatch = np.random.sample(self.memory, self.batch_size)
        for s, a, r, s_next in minibatch:
            self.update(s, a, r, s_next)