import numpy as np
import matplotlib.pyplot as plt


def read_map(file_path):
    with open(file_path, 'r') as file:
        lines = [line.strip() for line in file if line.strip()]
    layer_size = int(np.sqrt(len(lines)))
    map_data = []
    current_layer = []
    for line in lines:
        current_layer.append(list(map(int, list(line.strip()))))
        if len(current_layer) == layer_size:
            map_data.append(current_layer)
            current_layer = []
    return np.array(map_data)


def plot_3d_map(map_data):
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    x_size, y_size, z_size = map_data.shape
    for x in range(x_size):
        for y in range(y_size):
            for z in range(z_size):
                if map_data[x, y, z] == 1:
                    ax.bar3d(x, y, z, 1, 1, 1, color='black', alpha=0.5)
                elif map_data[x, y, z] == 2:
                    ax.bar3d(x, y, z, 1, 1, 1, color='green', alpha=0.7)
                elif map_data[x, y, z] == 3:
                    ax.bar3d(x, y, z, 1, 1, 1, color='red', alpha=0.7)
                else:
                    ax.bar3d(x, y, z, 1, 1, 1, color='white', edgecolor='black', alpha=0.1)
    ax.set_xlabel('X Axis')
    ax.set_ylabel('Y Axis')
    ax.set_zlabel('Z Axis')
    ax.set_xlim([0, x_size])
    ax.set_ylim([0, y_size])
    ax.set_zlim([0, z_size])
    plt.show()


actions_3d = [
    (0, -1, 0),  # 左
    (1, 0, 0),  # 前
    (0, 1, 0),  # 右
    (-1, 0, 0),  # 后
    (0, 0, 1),  # 上浮
    (0, 0, -1)  # 下沉
]


def create_environment_3d(map_data):
    grid_size_x, grid_size_y, grid_size_z = map_data.shape
    num_states = grid_size_x * grid_size_y * grid_size_z
    num_actions = len(actions_3d)
    P = np.zeros((num_states, num_actions, num_states))
    R = np.full((num_states, num_actions), 0)
    start_state = None
    goal_state = None

    for x in range(grid_size_x):
        for y in range(grid_size_y):
            for z in range(grid_size_z):
                state = x * (grid_size_y * grid_size_z) + y * grid_size_z + z
                for action_idx, (dx, dy, dz) in enumerate(actions_3d):
                    nx, ny, nz = x + dx, y + dy, z + dz
                    if 0 <= nx < grid_size_x and 0 <= ny < grid_size_y and 0 <= nz < grid_size_z:
                        next_state = nx * (grid_size_y * grid_size_z) + ny * grid_size_z + nz
                        if map_data[nx, ny, nz] != 1:
                            if map_data[nx, ny, nz] == 3:
                                R[state, action_idx] = 1
                            P[state, action_idx, next_state] = 1
                        else:
                            P[state, action_idx, next_state] = 2
                            R[state, action_idx] = -2
                    else:
                        R[state, action_idx] = -1
                        P[state, action_idx, state] = 1
                if map_data[x, y, z] == 2:
                    start_state = state
                elif map_data[x, y, z] == 3:
                    goal_state = state
                    R[state, :] = 1
    return P, R, start_state, goal_state, (grid_size_x, grid_size_y, grid_size_z)


if __name__ == '__main__':
    map_data = read_map('map4*4*4.txt')
    plot_3d_map(map_data)
    P, R, start_state, goal_state, grid_size = create_environment_3d(map_data)
    print(R)
    Q = np.zeros([grid_size[0] * grid_size[1] * grid_size[2], 6])
    alpha = 0.1
    gamma = 0.95
    num_episodes = 1000
    rs = np.zeros([num_episodes])

    for i in range(num_episodes):
        r_sum_i = 0
        t = 0
        done = False
        s = start_state
        while not done:
            a = np.argmax(Q[s, :] + np.random.randn(1, 6) * (1. / (i / 10 + 1)))
            s1 = np.argmax(P[s, a, :])
            if np.max(P[s, a, :]) != 1 or s1 == goal_state:
                done = True
            r = R[s, a]
            print(f'    step: %d, action: %d, next_status: %d, r: %d, done: %s' % (t, a, s1, r, done))
            Q[s, a] = (1 - alpha) * Q[s, a] + alpha * (r + gamma * np.max(Q[s1, :]))
            r_sum_i += r * gamma ** t
            s = s1
            t += 1
        rs[i] = r_sum_i
        print(f'epoch: %d,  reward: %f' % (i, r_sum_i))
    print(Q)

    r_cumsum = np.cumsum(np.insert(rs, 0, 0))
    r_cumsum = (r_cumsum[50:] - r_cumsum[:-50]) / 50
    plt.plot(r_cumsum)
    plt.show()

    s = start_state
    path = []
    total_reward = 0.0
    while s != goal_state:
        a = np.argmax(Q[s, :])
        path.append(int(s))
        s1 = np.argmax(P[s, a, :])
        total_reward += R[s, a]
        s = s1
    print(f"Best Path: {path}")
    print(f"Total Reward: {total_reward}")
    input("Press Enter to exit...")
