import torch
import torch.nn as nn

import gym
from torch.distributions import Categorical


def mlp(sizes, activation, output_activation=nn.Identity):
    layers = []
    for j in range(len(sizes) - 1):
        act = activation if j < len(sizes) - 2 else output_activation
        layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]
    return nn.Sequential(*layers)


def get_policy(obs):
    env = gym.make('CartPole-v0')
    obs_dim = env.observation_space.shape
    n_acts = env.action_space.n
    hidden_sizes = (16, 16)
    logits_net = mlp(sizes=[obs_dim[0]] + list(hidden_sizes) + [n_acts], activation=nn.Tanh)
    logits = logits_net(obs)
    return Categorical(logits=logits)


def get_action(obs):
    return get_policy(obs).sample().item()


def compute_loss(obs, act, weights):
    logp = get_policy(obs).log_prob(act)
    return -(logp*weights).mean()

def vpg():
    env = gym.make('CartPole-v0')

    def get_policy(obs):
        obs_dim = env.observation_space.shape
        n_acts = env.action_space.n
        hidden_sizes = (16, 16)
        logits_net = mlp(sizes=[obs_dim[0]] + list(hidden_sizes) + [n_acts], activation=nn.Tanh)
        logits = logits_net(obs)
        return Categorical(logits=logits)

    def get_action(obs):
        return get_policy(obs).sample().item()

    def compute_loss(obs, act, weights):
        logp = get_policy(obs).log_prob(act)
        return -(logp * weights).mean()

    def train_one_epoch():
        batch_size = 10
        # batch data
        batch_obs = []
        batch_acts = []
        batch_weights = []  # for R(tau) weighting in policy gradient
        batch_rets = []
        batch_lens = []

        obs = env.reset()
        done = False
        ep_rewards = []

        while True:
            batch_obs.append(obs.copy())
            act = get_action(torch.as_tensor(obs, dtype=torch.float32))
            obs, rew, done, _ = env.step(act)

            batch_acts.append(act)
            ep_rewards.append(rew)

            if done:
                # 记录一段轨迹的返回值及长度
                ep_ret, ep_len = sum(ep_rewards), len(ep_rewards)
                batch_rets.append(ep_ret)
                batch_lens.append(ep_len)
                # print(batch_obs)
                # print(batch_acts)
                # print(batch_rets)
                # print(batch_lens)

                # 设置每一个action的权重
                batch_weights += [ep_ret] * ep_len

                # reset
                obs, rew, done = env.reset(), 0, False
                print(len(batch_obs))
                if len(batch_obs) > batch_size:

                    break

    train_one_epoch()
