import tensorflow as tf
from tensorflow import keras
import numpy as np
from marioEnv import ACTIONS, action_num
import random

class Policy:
	def __init__(self, dropout=False):
		self.policy = keras.Sequential()
		self.conv0 = keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, activation='relu',input_shape=(84,84,4))
		self.conv1 = keras.layers.Conv2D(filters=32, kernel_size=(3,3), strides=2, activation='relu')
		self.conv2 = keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, activation='relu')
		self.conv3 = keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, activation='relu')
		self.dense = keras.layers.Dense(1024, activation='relu')
		self.out_layer = keras.layers.Dense(action_num, activation=tf.nn.softmax)
		self.policy.add(self.conv0)
		self.policy.add(self.conv1)
		self.policy.add(self.conv2)
		self.policy.add(self.conv3)
		self.policy.add(keras.layers.Flatten())
		if dropout:
			self.policy.add(keras.layers.Dropout(0.3))
		self.policy.add(self.dense)
		self.policy.add(self.out_layer)
		
	def sample_action(self, observation):
		info = ""
		logits = self.policy(observation).numpy().ravel()
		for action in range(len(ACTIONS)):
			info += f"{ACTIONS[action]}\t{int(logits[action] * 100)}%\n"
		action = np.random.choice(list(range(len(ACTIONS))), p=logits)
		#action = np.argmax(logits)
		info += f"choose action', {ACTIONS[action]}"
		
		#logits = policy(state)
		# tf.random.categorical 必须要没有经过softmax的非归一化的logits，网络输出层最后有了
		#action = tf.squeeze(tf.random.categorical(logits, 1), axis=1)
		#action = action[0].numpy()
		
		return action, info
		
	def save(self, path):
		self.policy.save_weights(path)
		
	def load(self, path):
		self.policy.load_weights(path)
		
	def fit(self, X, Y, test_rate=0.2):
		# prepare train & test data
		total_data_num = int(X.shape[0])
		indices = list(range(total_data_num))
		train_data_indices = random.sample(indices, int((1 - test_rate) * total_data_num))
		test_data_indices = list(set(indices) - set(train_data_indices))

		X_train = X[train_data_indices]
		Y_train = Y[train_data_indices]
		print(X_train.shape)
		print(Y_train.shape)

		X_test = X[test_data_indices]
		Y_test = Y[test_data_indices]
		print(X_test.shape)
		print(Y_test.shape)
		self.policy.compile(optimizer=keras.optimizers.Adam(1e-4), loss='categorical_crossentropy',metrics=['accuracy'])
		self.policy.fit(X_train, Y_train, epochs=200, batch_size=128, validation_data=(X_test, Y_test))

