import numpy as np
import gym
import time
from nes_py_v1.wrappers.joypad_space import JoypadSpace
from gym.spaces import Box
from gym import Wrapper
from nes_py_v1.mario_env import SuperMarioBrosEnv
import cv2

ACTIONS = [
	['NOOP'], # 0
	['left'], # 1
	['left', 'A'], # 2
	['right'], # 3
	['right', 'A'], # 4
	['A'], # 5
]

action_num = len(ACTIONS)

def create_train_env(target, process_frame=True, skip=True, skip_cnt=4):
	env = SuperMarioBrosEnv(target=target)
	env = JoypadSpace(env, ACTIONS)
	env = CustomReward(env)
	if process_frame:
		env = CustomProcessFrame(env, 84, 84)
	if skip and process_frame:
		env = CustomSkipFrame(env)
	elif process_frame:
		env = CustomMultiFrame(env)
	elif skip:
		env = CustomSimpleSkip(env, skip_cnt)
	return env

class CustomReward(Wrapper):
	def __init__(self, env=None):
		super(CustomReward, self).__init__(env)
		self.observation_space = Box(low=0, high=255, shape=(84, 84, 1))
		self.curr_score = env.get_score()
		self.current_x = env.get_x_position()
		self.current_time = env.get_time()
		self.current_life = env.get_life()

	def step(self, action):
		self.curr_score = self.env.get_score()
		self.current_x = self.env.get_x_position()
		self.current_time = self.env.get_time()
		self.current_life = self.env.get_life()
		
		state, reward, done, info = self.env.step(action)
		
		reward = (info['x_pos'] - self.current_x) * 10
		#reward += (info['time'] - self.current_time) * 50
		reward += (info["score"] - self.curr_score)
		reward += (info['life'] - self.current_life) * 1000
		
		if action == 1:
			reward -= 0.0001
		
		reward -= 0.01 # step cost
		
		if done:
			if info["flag_get"]:
				reward += 1000000
			else:
				reward -= 1000000
		
		self.curr_score = info["score"]
		self.current_x = info["x_pos"]
		self.current_time = info["time"]
		self.current_life = info["life"]
		return state, reward / 1000., done, info

	def reset(self):
		state = self.env.reset()
		self.curr_score = self.env.get_score()
		self.current_x = self.env.get_x_position()
		self.current_time = self.env.get_time()
		self.current_life = self.env.get_life()
		return state

class CustomProcessFrame(Wrapper):
	def __init__(self, env, width, height):
		super(CustomProcessFrame, self).__init__(env)
		self.width = width
		self.height = height
	def process_frame(self, frame):
		if frame is not None:
			frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
			frame = cv2.resize(frame, (self.width, self.height))[:, :, None] / 255.
			return frame
		else:
			return np.zeros((self.width, self.height, 1))
	def step(self, action):
		state, reward, done, info = self.env.step(action)
		state = process_frame(state)
		return state, reward / 1000., done, info
	def reset(self):
		return process_frame(self.env.reset()) 
		
class CustomSimpleSkip(Wrapper):
	def __init__(self, env, skip=4):
		super(CustomSimpleSkip, self).__init__(env)
		self.skip = skip

	def step(self, action):
		total_reward = 0
		for i in range(self.skip):
			state, reward, done, info = self.env.step(action)
			total_reward += reward
			if done:
				self.reset()
				return state, total_reward, done, info
		return state, total_reward, done, info
		
class CustomSkipFrame(Wrapper):
	def __init__(self, env, skip=4):
		super(CustomSkipFrame, self).__init__(env)
		self.observation_space = Box(low=0, high=255, shape=(84, 84, skip))
		self.skip = skip
		self.states = np.zeros((84, 84, skip), dtype=np.float32)

	def step(self, action):
		total_reward = 0
		last_states = []
		for i in range(self.skip):
			state, reward, done, info = self.env.step(action)
			total_reward += reward
			last_states.append(state)
			if done:
				self.reset()
				return self.states.astype(np.float32), total_reward, done, info
		self.states = np.concatenate(last_states, axis=2)
		return self.states.astype(np.float32), total_reward, done, info

	def reset(self):
		state = self.env.reset()
		self.states = np.concatenate([state for _ in range(self.skip)], axis=2)
		return self.states.astype(np.float32)
		
class CustomMultiFrame(Wrapper):
	def __init__(self, env, skip=4):
		super(CustomMultiFrame, self).__init__(env)
		self.observation_space = Box(low=0, high=255, shape=(84, 84, skip))
		self.skip = skip
		self.states = np.zeros((84, 84, skip), dtype=np.float32)

	def step(self, action):
		state, reward, done, info = self.env.step(action)
		state.shape = (84, 84)
		self.states[:,:,:-1] = self.states[:,:,1:]
		self.states[:,:,-1] = state
		return self.states.astype(np.float32), reward, done, info

	def reset(self):
		state = self.env.reset()
		self.states = np.concatenate([state for _ in range(self.skip)], axis=2)
		return self.states.astype(np.float32)