import sys
from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import matplotlib.pyplot as plt

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

def qc_single_goal_record(record):

    var_threshold = 10
    optimization_scale = 2

    var_threshold = 5
    optimization_scale = 2

    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.4 * abs(trial_len - trial_len_old) + 0.6 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                return True
    return False

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--t", type=int, default=1)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    t_value = args.t

    cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    """ create landscape
    """
    random_task = True
    # check if file on rpl_config.task_pth exists
    if os.path.isfile(rpl_config.task_pth):
        random_task = False

    if random_task:
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
    else:
        landscape, state, goal = load_task(pth = rpl_config.task_pth)
        landscape = [landscape]

    print("landscape :")
    print(landscape)

    '''empty space test
    '''
    blank_space = [1 for _ in range(12*12)]
    # make borders
    for i in range(12):
        blank_space[i] = 0
        blank_space[11*12 + i] = 0
        blank_space[i*12] = 0
        blank_space[i*12 + 11] = 0
    ''' create a batch of empty landscapes
    '''
    for i in range(len(landscape)):
        landscape[i] = blank_space

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    if not random_task:
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    concat_obs = GE.concat_obs

    print("shape of concat_obs: ", concat_obs.shape)


    step_count = 0
    render_id = 0
    last_position = np.array(GE.batched_states[render_id])
    vel = []
    vel_means = []

    
    probs = [0.1,0.1,0.1,0.1]
    actions = np.zeros(4)
    action = 0

    for t in range(10000):
        progress_bar(t, 10000)

        step_count += 1
        # random_y1 = jax.random.uniform(k1, shape=(1, 4))
        # k1, k2 = jax.random.split(k1)
        # batched_actions = get_action_vmap(random_y1)
        # # 将 actions 变成 random_y1 的形状
        # actions = np.reshape(actions, random_y1.shape)
        # batched_actions = get_action_vmap(actions)
        batched_goal_reached, concat_obs = GE.step(jnp.array([action]))

        current_position = np.array(GE.batched_states[render_id])

        
        if np.linalg.norm(current_position - last_position) <= 1.5:
            vel.append(current_position - last_position)
        
        last_position = current_position

        vel_mean = np.mean(np.array(vel)[-3000:], axis=0)
        vel_mean_norm = vel_mean/np.linalg.norm(vel_mean)
        vel_means.append(vel_mean_norm)

        # print("shape of vel_mean_norm: ", vel_mean_norm.shape)

        # self.act = jnp.array([[0, 1], [0, -1], [1, 0], [-1, 0], [0, 0]])
        if vel_mean_norm[1] > 0:
            probs[0] = vel_mean_norm[1]
            probs[1] = 0
        elif vel_mean_norm[1] < 0:
            probs[1] = -vel_mean_norm[1]
            probs[0] = 0
        if vel_mean_norm[0] > 0:
            probs[2] = vel_mean_norm[0]
            probs[3] = 0
        elif vel_mean_norm[0] < 0:
            probs[3] = -vel_mean_norm[0]
            probs[2] = 0
        
        probs[0]+=0.01
        probs[1]+=0.01
        probs[2]+=0.01
        probs[3]+=0.01
        # 计算 probs 的总和
        probs_sum = np.sum(probs)
        # 将 probs 归一化
        probs = probs / probs_sum
        # 按照 probs 的分布进行随机采样
        action = np.random.choice(4, p=probs)
        # # 输出采样结果
        # actions = np.zeros(4)
        # # actions[action] = 1

    board_img = GE.render2(render_id)
    cv2.imshow("img", board_img)
    cv2.waitKey(1)

    # 将 vel_means 转换为 numpy 数组
    vel_means = np.array(vel_means)

    # 创建两个子图，一个显示 x 坐标随时间的变化，另一个显示 y 坐标随时间的变化
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6))

    # 绘制 x 坐标随时间的变化
    ax1.plot(-1*vel_means[:, 0])
    ax1.set_ylabel('average_action_X')

    # 绘制 y 坐标随时间的变化
    ax2.plot(vel_means[:, 1])
    ax2.set_ylabel('average_action_Y')

    # 设置 x 轴标签
    plt.xlabel('time')

    # 显示 plot
    plt.show()
    

if __name__ == "__main__":
    main()