

from grid_world import GridWorld
import numpy as np
from value_iteration import ValueIteration
import pandas as pd

# Example usage:
if __name__ == "__main__":             
    env = GridWorld()
    state = env.reset()  
    policy_matrix=np.random.rand(env.num_states,len(env.action_space))                                            
    policy_matrix /= policy_matrix.sum(axis=1)[:, np.newaxis]  # 归一化处理
    V = np.zeros((env.num_states,))
    action_dict = {(0, -1) : 0, (0, 1) : 1, (-1,0) : 2, (1, 0): 3, (0,0): 4}
    delta = 1e-4
    gamma = 0.9
    value_i = ValueIteration(V, env, policy_matrix, action_dict, delta, gamma)
    value_i.run() 
    st = env.start_state  
    action_labels = ['←', '→', '↑', '↓', '·']  # 对应 action_dict 的动作顺序
    df = pd.DataFrame(value_i.policy_matrix, columns=action_labels)

    # 设置行索引为状态编号（可选）
    df.index.name = 'State'

    # 输出
    print(df)
    for t in range(1000):
        env.render()
        state_position = st[1] * env.env_size[1] + st[0]
        action_position = 0
        for index,x in enumerate(value_i.policy_matrix[state_position]):
            if x == 1:
                action_position = index
        action = None
        if action_position == 0:
            action = (0, -1)
        elif action_position == 1:
            action = (0, 1)
        elif action_position == 2:
            action = (-1, 0)
        elif action_position == 3:
            action = (1, 0)
        elif action_position == 4:
            action = (0, 0)
        st, reward, done, info = env.step(action)
        print(f"Step: {t}, Action: {action}, State: {st + np.array([1, 1])}, Reward: {reward}, Done: {done}")
        if done:
            break
    
    env.add_policy(value_i.policy_matrix)

    
    # # Add state values
    # values = np.random.uniform(0,10,(env.num_states,))
    # env.add_state_values(values)

    # Render the environment
    env.render(animation_interval=2)
    