import numpy as np
import time
from marioEnv import create_train_env, ACTIONS
from nes_py_v1.nes_env import GameStateManager

class Node:
	def __init__(self):
		self.game_state = None
		self.parent = None
		self.total_reward = 0
		self.heuristic_reward = 0
		self.success = False
		self.death = False
		self.action = -1
		self.depth = 0
		self.dead_child_count = 0
	def get_action_list(self):
		action_list = []
		node = self
		while node.parent != None:
			action_list.append(node.action)
			node = node.parent
		return action_list[::-1]
		
class NodePool:
	def __init__(self, start_capacity):
		self._pool = [Node() for _ in range(start_capacity)]
	def get(self):
		if len(self._pool) > 0:
			node = self._pool.pop()
			node.game_state = None
			node.parent = None
			node.total_reward = 0
			node.heuristic_reward = 0
			node.success = False
			node.death = False
			node.dead_child_count = 0
			node.action = -1
			node.depth = 0
			return node
		return Node()
	def release(self, node):
		self._pool.append(node)

class AStar:
	def __init__(self, simulator, action_num, skip, max_search_node, max_iteration_cnt):
		self.save_mgr = GameStateManager(max_search_node)
		self.node_pool = NodePool(max_search_node)
		self.max_search_node = max_search_node
		self.max_iteration_cnt = max_iteration_cnt
		self.simulator_env = simulator
		self.simulator_env.set_name("astar")
		self.action_num = action_num
		self.skip = skip
		self.open_list = set()
		self.close_list = set()
		self.iter_cnt = 0
	def release(self):
		for node in self.open_list:
			self.save_mgr.release(node.game_state)
			self.node_pool.release(node)
		for node in self.close_list:
			self.node_pool.release(node)
		self.open_list.clear()
		self.close_list.clear()
	# the best live node in close table
	def get_best_node(self):
		best_node = None
		max_reward = -9999999
		for node in self.close_list:
			if not node.death and node.dead_child_count == 0 and node.depth > 0:
				if node.total_reward > max_reward:
					max_reward = node.total_reward
					best_node = node
		for node in self.close_list:
			print(node.get_action_list(), node.dead_child_count, node.total_reward)
		return best_node
	def search(self, env):
		self.simulator_env.reset()
		# 把当前节点加入open表
		root_node = self.node_pool.get()
		root_node.game_state = self.save_mgr.save(env)
		self.open_list.add(root_node)
		# 迭代搜索
		self.iter_cnt = 0
		while len(self.open_list) > 0:
			if self.iter_cnt >= self.max_iteration_cnt or len(self.open_list) + self.action_num >= self.max_search_node:
				break
			
			selected_node = self.select()
			self.expand(selected_node)
			self.iter_cnt += 1
		best_node = self.get_best_node()
		action_list = best_node.get_action_list()
		print("final action")
		print(action_list, best_node.total_reward, best_node.dead_child_count)
		self.release()
		#input()
		return action_list
	def expand(self, node):
		# 扩展结点并添加到open表
		for i in range(self.action_num):
			self.save_mgr.load(self.simulator_env, node.game_state)
			skip_reward = 0
			for _ in range(self.skip):
				_, reward, done, info = self.simulator_env.step(i)
				skip_reward += reward
				if done:
					break
			#self.simulator_env.render()
			#time.sleep(1./60.)
			child_node = self.node_pool.get()
			child_node.parent = node
			child_node.depth = node.depth + 1
			child_node.game_state = self.save_mgr.save(self.simulator_env)
			child_node.total_reward = node.total_reward + skip_reward
			#child_node.heuristic_reward = info['time'] * 0.05 # 50 / 1000.
			child_node.heuristic_reward = -child_node.depth * 0
			child_node.action = i
			if done:
				if info['flag_get']:
					child_node.success = True
				else:
					child_node.death = True
					node.dead_child_count += 1
				self.simulator_env.clear_done()
			if not child_node.death:
				self.open_list.add(child_node)
			
		if node.dead_child_count == self.action_num:
			# 这个节点必死，把这个信息反向传播
			cur_node = node.parent
			if cur_node != None:
				cur_node.dead_child_count += 1
				while cur_node.dead_child_count == self.action_num:
					cur_node = cur_node.parent
					if cur_node == None:
						break
					cur_node.dead_child_count += 1
			
		# 扩展后的节点放到close表并释放模拟器资源，node本身在search结束再释放
		self.save_mgr.release(node.game_state)
		self.close_list.add(node)
		self.open_list.remove(node)

	# 从open表选择一个最适合的节点扩展
	def select(self):
		#print("current open list:")
		#for node in self.open_list:
		#	print(node.get_action_list(), node.total_reward)
		
		max_reward = -9999999
		for node in self.open_list:
			if node.death:
				print("bug???")
				continue
			node_reward = node.total_reward + node.heuristic_reward
			if node_reward > max_reward:
				max_reward = node_reward
				best_node = node
		if max_reward == -9999999:
			print("no best????", self.iter_cnt)
			for node in self.open_list:
				print(node.get_action_list(), node.total_reward)
		#action_list = best_node.get_action_list()
		#print("choose: ", action_list, best_node.total_reward, best_node.heuristic_reward)
		#input()
		return best_node