import GridWorld_my
import numpy as np


# 初始化V(s)
def initStateValue():
    State_Value = np.zeros((4, 4))
    return State_Value


def StateTransValue(state):
    x = state % 4
    j = x
    if state < 4:
        y = 0

    elif state > 3 and state < 8:
        y = 1
    elif state > 7 and state < 12:
        y = 2
    elif state > 11 and state < 16:
        y = 3
    i = 3 - y
    return (i, j)


def train(state, next_state, reward, state_value, env):
    i, j = StateTransValue(state)
    v = state_value[i][j]
    value = 0
    m = [0, 0, 0, 0]
    for a in range(4):
        w, _, _, _ = env.step(a)
        env.state = state
        m[a] = w
    for b in range(4):
        i_next, j_next = StateTransValue(m[b])
        value += (reward + int(state_value[i_next][j_next]))
    state_value[i][j] = 0.25 * value
    v_new = state_value[i][j]
    return v, v_new


if __name__ == "__main__":
    env = GridWorld_my.four_four()
    seta = 0.01
    state_value = initStateValue()
    for i in range(1000):
        env.render()
        env.reset()
        state = env.state
        while 1:
            a = env.action_space.sample()
            state_next, reward, isdone, _ = env.step(a)
            env.state = state
            v, v_new = train(state, state_next, reward, state_value, env)
            state = state_next
            if isdone:
                break
    print("End of the iteration!")
    print(state_value)
