import numpy as np
import pandas as pd
class updateValue():
    def __init__(self, policy_matrix, env, action_dict, gamma):
        self.policy_matrix = policy_matrix
        self.env = env
        self.action_dict = action_dict
        self.gamma = gamma
        self.count = 0

    def update_State_value(self, V_old):
        v_new = []
        self.count += 1
        for state in range(self.env.num_states):
            value = 0
            for action in self.env.action_space:
                action_prob = self.policy_matrix[state][self.action_dict.get(action)]
                next_state, reward = self._get_next_state_and_reward(state, action)
                value += action_prob * (reward + self.gamma * V_old[next_state])
                # print(f"count = {self.count}, action_prob = {action_prob},  state = {state}, action = {action}, next_state = {next_state}, reward = {reward}")
            v_new.append(value)
            # print(f"V_value = {value}")
            # print("\n")

        return v_new

    

    def _get_next_state_and_reward(self, state, action):
        state_ax = (state % self.env.env_size[1], state // self.env.env_size[1])
        new_state = tuple(np.array(state_ax) + np.array(action))
        x, y = state_ax
        if y + 1 > self.env.env_size[1] - 1 and action == (0,1):    # down
            y = self.env.env_size[1] - 1
            reward = self.env.reward_forbidden  
        elif x + 1 > self.env.env_size[0] - 1 and action == (1,0):  # right
            x = self.env.env_size[0] - 1
            reward = self.env.reward_forbidden  
        elif y - 1 < 0 and action == (0,-1):   # up
            y = 0
            reward = self.env.reward_forbidden  
        elif x - 1 < 0 and action == (-1, 0):  # left
            x = 0
            reward = self.env.reward_forbidden 
        elif new_state == self.env.target_state:  # stay
            x, y = self.env.target_state
            reward = self.env.reward_target
        elif new_state in self.env.forbidden_states:  # stay
            x, y = state_ax
            reward = self.env.reward_forbidden        
        else:
            x, y = new_state
            reward = self.env.reward_step
        new_state_position = y * self.env.env_size[1] + x 
        return  (new_state_position, reward)
    
    def update_policy_matrix(self, V):
        for state in range(self.env.num_states):
            best_action = None
            best_action_value = float('-inf')
            for action in self.env.action_space:
                new_state, reward = self._get_next_state_and_reward(state, action)
                if  best_action_value < reward + self.gamma * V[new_state]:
                    best_action_value = reward + self.gamma * V[new_state]
                    best_action = action

            self.policy_matrix[state] = np.zeros(len(self.env.action_space))
            self.policy_matrix[state][self.action_dict.get(best_action)] = 1.0
        # action_labels = ['←', '→', '↑', '↓', '·']  # 对应 action_dict 的动作顺序
        # df = pd.DataFrame(self.policy_matrix, columns=action_labels)

        # # 设置行索引为状态编号（可选）
        # df.index.name = 'State'

        # # 输出
        # print(df)
            
            
