import torch
import gym
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from   torch.distributions import Categorical

np.random.seed(1)
torch.manual_seed(1)

class Net(nn.Module):    #! 策略网络，用于在给定状态下输出每个action的概率分布
	def __init__(self, n_feature, n_hidden, n_output):  # （4,10,2）
		super(Net, self).__init__()
		self.layer1 = nn.Linear(n_feature, n_hidden)
		self.layer2 = nn.Linear(n_hidden,  n_output)

	def forward(self, x):
		x = self.layer1(x)
		x = torch.tanh(x)
		x = self.layer2(x)
		return x

class PolicyGradient:
	def __init__(self, n_actions, n_features, n_hidden=10, 
            learning_rate=0.01, reward_decay=0.95):
		self.n_actions  = n_actions         # 动作空间
		self.n_features = n_features        # 状态空间
		self.n_hidden   = n_hidden          # 隐藏层维度
		self.lr         = learning_rate     # 学习率
		self.gamma      = reward_decay      # 奖励衰减率

		# 环境信息列表(该次实验历史的env stated集合)
		# 标签列表(该次实验历史的action列表)
		# 奖励列表(该次实验历史的即时奖励)
		self.ep_obs, self.ep_as, self.ep_rs = [], [], []
		self._device = "cuda" if torch.cuda.is_available() else "cpu"
		self._build_net()
		self.training = True
		

	def _build_net(self):
		self.net       = Net(self.n_features, self.n_hidden, 
                       		self.n_actions).to(self._device)
		self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr)

	def choose_action(self, observation):
		# 根据观察到的状态值计算输出动作的概率分布
		observ = torch.Tensor(observation[np.newaxis, :]).to(self._device)
		probs  = F.softmax(self.net(observ), dim=-1)
  
		if self.training:
			# 根据概率值创建一个分布，然后可以利用这个分布进行采样
			#! 训练时根据概率分布创建action
			dist   = Categorical(probs[0].cpu().detach())
			action = dist.sample().item()
		else:
			#! 推理时直接采用概率足最大的action
			action = int(probs[0].argmax().cpu().detach().numpy())
		# print(f"action = {action}, probs = {probs}")
		return action

	def store_transition(self, s, a, r):
		self.ep_obs.append(s)
		self.ep_as.append(a)
		self.ep_rs.append(r)
  
	def save(self, model_path):
		torch.save(self.net, model_path)
  
	def __call__(self):
		discounted_reward = self._discount_and_norm_rewards()
		obs  = torch.Tensor(np.vstack(self.ep_obs)).to(self._device)     # 将输入的观测值组成batch
		acts = torch.Tensor(np.array(self.ep_as)).to(self._device)       # label标签
		vt   = torch.Tensor(discounted_reward).to(self._device)
  
		#! 先进性一次完整的实验再进行训练
		ret       = self.net(obs)
  
        #! 注意这里的损失函数，ret是模型输出的actions， acts是训练前实验过程中采用的actions
		#! 策略梯度算法梯度推导和损失函数: https://zhuanlan.zhihu.com/p/563053450
		criterion = F.cross_entropy(ret, acts.long(), reduce=False)
		loss      = torch.mean(criterion * vt)
  
		self.optimizer.zero_grad()
		loss.backward()
		self.optimizer.step()
		self.ep_obs, self.ep_as, self.ep_rs = [],[],[]                     # 本次实验结束，重新开始新的实验轮次

		return discounted_reward
  
  
	def _discount_and_norm_rewards(self):
		"""
		根据历史的即时reward分数计算归一化后的discounted reward!
		"""
		#! 创建与当前瞬时奖励同大小的reward向量，用于计算dicounted reward
		discounted_ep_rs = np.zeros_like(self.ep_rs)  
		running_add = 0
		#! discounted_ep_rs[i] = ep_rs[i] + ep_rs[i+1] * gamma + ep_rs[i+2] * gamma**2 + ep_rs[i+3] * gamma**3 + ...
		#! 当前的action对后续的动作都有影响，影响的重要程度按gamma系数衰减
		for t in reversed(range(len(self.ep_rs))): # [len-1, 0]
			running_add = running_add*self.gamma + self.ep_rs[t]
			discounted_ep_rs[t] = running_add

		# 对discounted reward进行归一化处理
		discounted_ep_rs -= np.mean(discounted_ep_rs)
		discounted_ep_rs /= np.std(discounted_ep_rs)
		return discounted_ep_rs

env = gym.make('CartPole-v1')
env = env.unwrapped

# print(env.action_space)
# print(env.observation_space)
# print(env.observation_space.high)
# print(env.observation_space.low)

model = PolicyGradient(n_actions=env.action_space.n, 
                       n_features=env.observation_space.shape[0],
                       learning_rate=0.01, reward_decay=0.95)

def model_train(model, model_path):
	for i in range(3000):
		observ = env.reset()[0]
		while True:
			# print(observ, type(observ[0]))
			action = model.choose_action(observ)
			observ, reward, done, truncted, info = env.step(action)
			# print(observ, type(observ[0]))
			model.store_transition(observ, action, reward)
			
			if done:     #! done表示一轮while循环的数据采集结束，可以进行该轮次的训练
				ep_rs_sum = sum(model.ep_rs)
				if (i+1) % 10 == 0:
					print(f"step = {i}, total reward = {ep_rs_sum}")
					
				if ep_rs_sum >= 30000:
					model.save(model_path)
					print(f"step = {i}, total reward = {ep_rs_sum}")
					return
				vt = model()         
				break


def play(model):
    observ = env.reset()[0]
    step = 0
    while True:
        action = model.choose_action(observ)
        observ, reward, done, truncted, info = env.step(action)
        model.store_transition(observ, action, reward)
        step += 1
        
        if step % 1000 == 0:
            print(f"Game continue after {step} actions!")

        if done:        
            ep_rs_sum = sum(model.ep_rs)
            print(f"Game over after {step} actions, total reward = {ep_rs_sum}")
            return
        
        
if __name__ == "__main__":
    model_path = "cart_pole.pth"
    # model_train(model, model_path)
    
    net            = torch.load(model_path)
    model.net      = net
    model.training = False
    play(model) 