# -*- coding: utf-8 -*-

import os
import copy
import pickle

import numpy as np
import gymnasium as gym
import ray

from ray.rllib.core.rl_module import RLModule

# from pykdsim.kdsim_env import KdSimEnv, builder_distribute_env
# from pykdsim.kdsim_env_op import KdSimEnv, builder_distribute_env

# gym.register(
#     id='KdSim-v0',
#     entry_point=KdSimEnv,
# )


# to gif
import matplotlib.pyplot as plt 
from matplotlib import animation 

def display_frames_as_gif(frames, id, path):
    patch = plt.imshow(frames[0])
    plt.axis('off')
    def animate(i):
        patch.set_data(frames[i])

    anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=5)
    anim.save(os.path.join(path, f'{id}.gif'), writer='pillow', fps=30)
    plt.close()


def eval_policy(policy, env_name, seed, eval_episodes=100, return_mode='666',
                render=False, save_demo=False, save_gif=False, path=None, log=True):
    render_mode = None
    if render: render_mode = 'human'
    elif save_gif: render_mode = 'rgb_array'

    use_ray = True

    # eval_env = gym.make(env_name, render_mode=render_mode)
    if 'KdSim' in env_name:
        if use_ray:
            eval_env_cls = builder_distribute_env(KdSimEnv).as_remote()
            eval_env = eval_env_cls.remote(max_episode_steps=100, render_mode=render_mode, speed=5)
        else:
            eval_env = KdSimEnv(max_episode_steps=100, render_mode=render_mode, speed=5)
    else:
        eval_env = gym.make(env_name, render_mode=render_mode)
    # eval_env = policy.envs

    score_list = []
    all_demo_ls = []
    if save_demo:
        save_demo_path = path
        os.makedirs(save_demo_path, exist_ok=True)
    if save_gif:
        save_gif_path = os.path.join(path, "gif")
        os.makedirs(save_gif_path, exist_ok=True)

    tr_cnt = 0
    for i in range(eval_episodes):
        st_cnt = 0
        ep_r = 0.
        demo_ls = []
        frames = []
        if use_ray:
            (state, info), done = ray.get(eval_env.reset.remote()), False
        else:
            (state, info), done = eval_env.reset(), False
        success = False
        while not done:
            if render:
                if use_ray:
                    ray.get(eval_env.render.remote())
                else:
                    eval_env.render()
                # time.sleep(0.01)
            elif save_gif:
                if use_ray:
                    frames.append(ray.get(eval_env.render.remote()))
                else:
                    frames.append(eval_env.render())
            last_state = copy.deepcopy(state)
            # 计算动作
            # obs_batch = torch.from_numpy(np.array(state))
            # action_logits = policy.forward_inference({'obs': obs_batch})["action_dist_inputs"]
            # action = torch.argmax(action_logits[0]).numpy()
            action = policy.compute_single_action(state)
            # print(action)
            # 步进
            if use_ray:
                state, reward, terminated, truncated, info = ray.get(eval_env.step.remote(action))
            else:
                state, reward, terminated, truncated, info = eval_env.step(action)
            done = terminated or truncated
            if info.get('arrived'): success = True
            demo_ls.append((last_state, action, reward, state, done, info))
            ep_r += reward
            st_cnt += 1
            # print(st_cnt)
        tr_cnt += 1
        score_list.append(ep_r)
        if (render or save_gif or save_demo) and log:
            print('ep_r:', ep_r)    
            print('st_cnt:', st_cnt)
            reward_mean, reward_var = np.mean(score_list), np.var(score_list)
            reward_median = np.median(score_list)
            reward_min, reward_max = np.min(score_list), np.max(score_list)
            print(f'mean/median reward {reward_mean:0.2f}/{reward_median:0.2f}, var {reward_var:0.2f}, max/min reward {reward_max:0.2f}/{reward_min:0.2f}')
        good = True # if not success else False
        if save_gif and good:
            display_frames_as_gif(frames, i, save_gif_path)
        if save_demo and good:
            all_demo_ls += demo_ls
            # if len(all_demo_ls) >= 1000 and tr_cnt >= 5:
            if len(all_demo_ls) >= 1000:
                reward_mean, reward_var = np.mean(score_list), np.var(score_list)
                reward_median = np.median(score_list)
                reward_min, reward_max = np.min(score_list), np.max(score_list)
                if log:
                    print(f'mean/median reward {reward_mean:0.2f}/{reward_median:0.2f}, var {reward_var:0.2f}, max/min reward {reward_max:0.2f}/{reward_min:0.2f}')
                _path = f"{str(env_name).lower().split('-')[0]}_demo_r{int(np.mean(score_list))}_n{len(all_demo_ls)}_t{tr_cnt}.pkl"
                with open(os.path.join(save_demo_path, _path), "wb") as f:
                    pickle.dump(all_demo_ls, f)
                break

    reward_mean, reward_var = np.mean(score_list), np.var(score_list)
    reward_median = np.median(score_list)
    reward_min, reward_max = np.min(score_list), np.max(score_list)
    print("---------------------------------------")
    print(f"Evaluation over {eval_episodes} episodes")
    print(f'mean/median reward {reward_mean:0.2f}/{reward_median:0.2f}, var {reward_var:0.2f}, max/min reward {reward_max:0.2f}/{reward_min:0.2f}')
    print("---------------------------------------")
    if return_mode == 'mean':
        return reward_mean
    elif return_mode == 'all':
        return score_list, reward_mean, reward_var
    else:
        return score_list


# if __name__ == "__main__":
#     main()
