import os
import yaml
import sys
import itertools
import copy
import numpy as np
from tqdm import tqdm
from collections import OrderedDict
from stable_baselines3.common.vec_env import DummyVecEnv

from logger import Manager

def next_observation(model,prev_observations,actions,observations, dones):
    if 'hidden_h' in observations:
        # for rnn 
        # reset next obs hidden_h and hidden_c
        _observations = OrderedDict()
        actions = model.policy.scale_action(actions)
        _observations['action'] = (actions * (1-np.stack((dones,), axis = -1))).astype(np.float32)
        for key in observations:
            _observations[key] = observations[key].astype(np.float32)
        _observations['hidden_h'] = prev_observations['hidden_h']
        _observations['hidden_c'] = prev_observations['hidden_c']
        causal, hidden_h, hidden_c = model.policy.rnn_encoder_predict(_observations)
        observations['causal'] = causal.astype(np.float32)
        observations['hidden_h'] = hidden_h * (1-np.stack((dones,), axis = -1)).astype(np.float32)
        observations['hidden_c'] = hidden_c * (1-np.stack((dones,), axis = -1)).astype(np.float32)
    
    return observations

def test_model(model, manager:Manager, hook, time_steps=-1):

    # #############hook init#############
    hook.start_test(manager.model_parameters['train_envs'])
    # #############hook init#############
    tsne_x,tsne_y,tsne_c = [],[],[]
    train_env_i = -1
    for _, _env_info in tqdm(enumerate(hook.test_envs)):
        # test env
        env = hook.make_env(manager, _env_info)
        test_env = DummyVecEnv([env])

        # set train env
        if _env_info in hook.train_envs:
            train_env_i += 1

        # ###########hook env start###########
        hook.start_env(_env_info)
        # ###########hook env start###########

        if manager.model_parameters['save_video']:
            manager.enable_video()
        else:
            manager.disable_video()
        
        while len(hook.test_infos[hook.encoder_env_info(_env_info)]['eps_states']) < manager.model_parameters['test_eps_num_per_env']:
            observations = test_env.reset()
            states = None
            episode_starts = np.ones((test_env.num_envs,), dtype=bool)
            _eps_states = []
            manager.reset_video()
            for eps_i in range(hook.max_step_num):
                manager.record_video(test_env)
                actions, states = model.predict(
                    observations,
                    state=states,
                    episode_start=episode_starts,
                    deterministic=True,
                )
                prev_observations = copy.deepcopy(observations)
                observations, rewards, dones, infos = test_env.step(actions)
                observations = next_observation(model,prev_observations,actions,observations, dones)
                
                if not dones:
                    _eps_states.append(hook.get_state(test_env))

                if dones and eps_i <= 5:
                    break

                elif dones:
                    if 'hidden_h' in observations and _env_info in hook.train_envs and len(hook.test_infos[hook.encoder_env_info(_env_info)]['eps_states'])<=30:
                        # print("Test:", len(hook.test_infos[hook.encoder_env_info(_env_info)]['eps_states']))
                        tsne_x.append(observations['causal'])
                        tsne_y.append(train_env_i)
                        class_name = hook.encoder_env_info(_env_info)
                        if class_name not in tsne_c:
                            tsne_c.append(class_name)

                    if infos[0]['is_success']:
                        _eps_states.append('success')
                    else:
                        _eps_states.append('fail')
                    break
            
            if len(_eps_states)==0 or (_eps_states[-1] !='success' and _eps_states[-1] !='fail'):
                continue

            if _eps_states[-1] == 'success':
                manager.save_video(f'f{_env_info[0]},m{_env_info[1]}.mp4')
                manager.disable_video()

            # ###########hook eps end###########
            hook.end_eps(_env_info, _eps_states)
            # ###########hook eps end###########

        # ###########hook env end###########
        hook.end_env(_env_info, model.logger)
        # ###########hook env end###########

        sys.stdout.flush()
        test_env.close()

    if len(tsne_x) > 0:
        # 绘制tsne
        manager.plot_scatter(np.concatenate(tsne_x,axis=0),np.array(tsne_y),tsne_c)
    # ###########hook end###########
    hook.end_hook(manager, time_steps)
    # ###########hook end###########
