import gym
import sys

# environment
sys.path.append('../highway-env')
import highway_env

# models and computation
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import namedtuple

# Visualization
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tnrange
from IPython import display as ipythondisplay
from pyvirtualdisplay import Display
from gym.wrappers import Monitor
import base64

# IO
from pathlib import Path


# # define a simple helper function for visualization of episodes
# display = Display(visible=0, size=(1400, 900))
# display.start()


# def show_video():
#     html = []
#     for mp4 in Path("video").glob("*.mp4"):
#         video_b64 = base64.b64encode(mp4.read_bytes())
#         html.append('''<video alt="{}" autoplay 
#                       loop controls style="height: 400px;">
#                       <source src="data:video/mp4;base64,{}" type="video/mp4" />
#                  </video>'''.format(mp4, video_b64.decode('ascii')))
#     ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))


# # Make the environment, and run an episode with random actions
env = gym.make("highway-v0")
env = Monitor(env, './video', force=True, video_callable=lambda episode: True)
env.reset()
done = False
while not done:
    action = env.action_space.sample()
    obs, reward, done, info = env.step(action)
    # print("Observation format:", obs[0])
    print("action  ",action)
env.close()
show_video()

# The environment is a GoalEnv, which means the agent receives a dictionary containing both the current observation and the desired_goal that conditions its policy
# There is also an achieved_goal that won't be useful here (it only serves when the state and goal spaces are different, as a projection from the observation to the goal space).
print("Observation format:", obs)

print("working     ")


# # Experience collection
# # First, we randomly interact with the environment to produce a batch of experiences
# Transition = namedtuple('Transition', ['state', 'action', 'next_state'])

# def collect_interaction_data(env, size=1000, action_repeat=2):
#     data, done = [], True
#     for _ in tnrange(size, desc="Collect interaction data"):
#         action = env.action_space.sample()
#         for _ in range(action_repeat):
#             previous_obs = env.reset() if done else obs
#             obs, reward, done, info = env.step(action)
#             data.append(Transition(torch.Tensor(previous_obs["observation"]),
#                                    torch.Tensor(action),
#                                    torch.Tensor(obs["observation"])))
#     return data

# data = collect_interaction_data(env)
# print("Sample transition:", data[0])