'''
对样本收集过程进行了处理，使其能够处理多playground的情况
'''
import math
import os
import time
from collections import namedtuple
import logging
import numpy as np
from numpy.core.numeric import True_
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor

from env_wrapper import Multi_scene, Single_scene

Transition = namedtuple('Transition', ('state', 'value',
                                       'action', 'logproba', 'mask', 'next_state', 'reward'))
EPS = 1e-10


class RunningStat(object):
    def __init__(self, shape):
        self._n = 0
        self._M = np.zeros(shape)
        self._S = np.zeros(shape)

    def push(self, x):
        x = np.asarray(x)
        assert x.shape == self._M.shape
        self._n += 1
        if self._n == 1:
            self._M[...] = x
        else:
            oldM = self._M.copy()
            self._M[...] = oldM + (x - oldM) / self._n
            self._S[...] = self._S + (x - oldM) * (x - self._M)

    @property
    def n(self):
        return self._n

    @property
    def mean(self):
        return self._M

    @property
    def var(self):
        return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)

    @property
    def std(self):
        return np.sqrt(self.var)

    @property
    def shape(self):
        return self._M.shape


class ZFilter:
    """
    y = (x-mean)/std
    using running estimates of mean,std
    """

    def __init__(self, shape, demean=True, destd=True, clip=10.0):
        self.demean = demean
        self.destd = destd
        self.clip = clip

        self.rs = RunningStat(shape)

    def __call__(self, x, update=True):
        if update:
            self.rs.push(x)
        if self.demean:
            x = x - self.rs.mean
        if self.destd:
            x = x / (self.rs.std + 1e-8)
        if self.clip:
            x = np.clip(x, -self.clip, self.clip)
        return x

    def output_shape(self, input_space):
        return input_space.shape


class ActorCritic(nn.Module):
    def __init__(self, num_inputs, num_outputs, hidden_size=512, layer_norm=True):
        super(ActorCritic, self).__init__()
        # assert action_range != None, "请输入当前动作的变化范围，不能为[1,1]"

        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        self.hidden_size = hidden_size
        self.running_state = ZFilter((10, num_inputs), clip=5.0)

        self.actor_fc1 = nn.Linear(num_inputs, self.hidden_size)
        self.actor_fc2 = nn.Linear(self.hidden_size, self.hidden_size)
        self.actor_fc3 = nn.Linear(self.hidden_size, self.num_outputs)
        self.actor_logstd = nn.Parameter(torch.zeros(1, num_outputs))

        self.critic_fc1 = nn.Linear(num_inputs, self.hidden_size)
        self.critic_fc2 = nn.Linear(self.hidden_size, self.hidden_size)
        self.critic_fc3 = nn.Linear(self.hidden_size, 1)

        if layer_norm:
            self.layer_norm(self.actor_fc1, std=1.0)
            self.layer_norm(self.actor_fc2, std=1.0)
            self.layer_norm(self.actor_fc3, std=0.01)

            self.layer_norm(self.critic_fc1, std=1.0)
            self.layer_norm(self.critic_fc2, std=1.0)
            self.layer_norm(self.critic_fc3, std=1.0)

    @staticmethod
    def layer_norm(layer, std=1.0, bias_const=0.0):
        torch.nn.init.orthogonal_(layer.weight, std)
        torch.nn.init.constant_(layer.bias, bias_const)

    def forward(self, states):
        """
        run policy network (actor) as well as value network (critic)
        :param states: a Tensor2 represents states
        :return: 3 Tensor2
        """
        action_mean, action_logstd = self._forward_actor(states)
        critic_value = self._forward_critic(states)
        # todo: 把action_mean再用tanh处理一下，因为环境是-1到1？
        action_mean = torch.tanh(action_mean)
        return action_mean, action_logstd, critic_value

    def _forward_actor(self, states):
        x = torch.tanh(self.actor_fc1(states))
        x = torch.tanh(self.actor_fc2(x))
        action_mean = self.actor_fc3(x)
        action_logstd = self.actor_logstd.expand_as(action_mean)
        return action_mean, action_logstd

    def _forward_critic(self, states):
        x = torch.tanh(self.critic_fc1(states))
        x = torch.tanh(self.critic_fc2(x))
        critic_value = self.critic_fc3(x)
        return critic_value

    def select_action(self, action_mean, action_logstd, return_logproba=True):
        """
        given mean and std, sample an action from normal(mean, std)
        also returns probability of the given chosen
        """
        action_std = torch.exp(action_logstd)
        action = torch.normal(action_mean, action_std)
        if return_logproba:
            logproba = self._normal_logproba(
                action, action_mean, action_logstd, action_std)
        return action, logproba

    @staticmethod
    def _normal_logproba(x, mean, logstd, std=None):
        if std is None:
            std = torch.exp(logstd)

        std_sq = std.pow(2)
        logproba = - 0.5 * math.log(2 * math.pi) - \
            logstd - (x - mean).pow(2) / (2 * std_sq)
        return logproba.sum(1)

    def get_logproba(self, states, actions):
        """
        return probability of chosen the given actions under corresponding states of current network
        :param states: Tensor
        :param actions: Tensor
        """
        action_mean, action_logstd = self._forward_actor(states)
        logproba = self._normal_logproba(actions, action_mean, action_logstd)
        return logproba


def tanh(x):
    s1 = np.exp(x) - np.exp(-x)
    s2 = np.exp(x) + np.exp(-x)
    s = s1 / s2
    return s


def sigmoid(x):
    s = 1 / (1 + np.exp(-x))
    return s


class Memory(object):
    def __init__(self):
        self.memory = []

    def push(self, *args):
        self.memory.append(Transition(*args))

    def sample(self):
        return Transition(*zip(*self.memory))

    def __len__(self):
        return len(self.memory)


class args(object):
    seed = 17
    num_episode = 1000
    batch_size = 10240
    max_step_per_round = 200
    gamma = 0.995
    lamda = 0.95
    log_num_episode = 1
    num_epoch = 10
    minibatch_size = 1024
    clip = 0.2
    loss_coeff_value = 0.5
    loss_coeff_entropy = 0.01
    lr = 0.0003
    # tricks
    schedule_adam = 'linear'
    schedule_clip = 'linear'
    layer_norm = True
    state_norm = True
    advantage_norm = True
    lossvalue_norm = True




def ppo(args):
    logging.basicConfig(
        level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    logger = logging.getLogger(__name__)
    current_time = time.strftime("%Y-%m-%d__%H-%M-%S", time.localtime())
    print("开始训练，时间:{}".format(current_time),flush=True)
    print(args,flush=True)
    
    # env = Single_scene("args")
    env = Multi_scene("args")

    num_inputs = env.obs_shape
    num_actions = env.action_shape

    torch.manual_seed(args.seed)
    network = ActorCritic(num_inputs, num_actions, layer_norm=args.layer_norm)
    optimizer = opt.Adam(network.parameters(), lr=args.lr)
    # running_state = ZFilter((num_inputs,), clip=5.0)

    # 保存每一个epsiode的平均奖励
    reward_record = []
    global_steps = 0
    lr_now = args.lr
    clip_now = args.clip

    for i_episode in range(args.num_episode):

        memory_list = [Memory() for _ in range(10)]
        
        num_steps = 0
        reward_list = []
        len_list = []
        while num_steps < args.batch_size:
            state = env.reset()
            calculate_done = [0]*10

            if args.state_norm:
                state = network.running_state(state)
            # 这个reward保存的是这10个play_ground奖励的平均
            reward_sum = 0
            for t in range(args.max_step_per_round):
                action_mean, action_logstd, value = network(
                    Tensor(state))
                action, logproba = network.select_action(
                    action_mean, action_logstd)
                action = action.data.numpy()
                logproba = logproba.data.numpy()
                next_state, reward, done, _ = env.step(action)
                if args.state_norm:
                    next_state = network.running_state(next_state)

                '''
                下面代码的简单说明
                1. 目前场景里面是10个playground，这里做了一个简单的处理：根据每个智能体是否done，把对应的数据放进buffer里面，
                done是一个10个元素的list，0表示对应智能体没有结束episode，
                2. 为什么要用calculate_done判断：我们需要保存每个智能体结束时的奖励信息，如果用done[idx_done] == 0统计，
                最后结束状态下的信息无法保存。使用calculate_done可以解决这个问题，当环境done的时候，让这个值变为0，
                这样能够保存done时刻下智能体的信息（因为此时calculate_done还没有变为1，要等数据读进去才变成1）
                3. 奖励的记录，奖励 = sum（目前存活智能体的奖励）/ (存货智能体的数目)，即当前时刻，所有playground智能体的奖励均值
                '''
                playground_reward = []
                for idx in range(len(done)):
                    # 把没有done智能体的数据放入buffer里面
                    # tag: 注意在这里为0 表示没有终结
                    if calculate_done[idx] == 0:
                        # if done[idx] == 0:
                        mask = 0 if done[idx] else 1
                        playground_reward.append(reward[idx])
                        memory_list[idx].push(state[idx], value[idx], action[idx], logproba[idx],
                                    mask, next_state[idx], reward[idx])
                    if done[idx] == 1 and calculate_done[idx] == 0 :
                        calculate_done[idx] = 1



                if playground_reward == []:
                    reward_sum += 0
                else:
                    reward_sum += np.mean(playground_reward)

                if sum(done) == len(done):
                    break

                state = next_state

  
            num_steps += (t*10 + 1)
            global_steps += (t*10 + 1)
            reward_list.append(reward_sum)
            len_list.append(t*10 + 1)

        reward_record.append({
            'episode': i_episode,
            'steps': global_steps,
            'meanepreward': np.mean(reward_list),
            'meaneplen': np.mean(len_list)})



        memory = Memory()
        for i in range(len(memory_list)):
            memory.memory.extend(memory_list[i].memory)
        
        batch = memory.sample()
        batch_size = len(memory)

        rewards = Tensor(batch.reward)
        values = Tensor(batch.value)
        masks = Tensor(batch.mask)
        actions = Tensor(batch.action)
        states = Tensor(batch.state)
        oldlogproba = Tensor(batch.logproba)

        returns = Tensor(batch_size)
        deltas = Tensor(batch_size)
        advantages = Tensor(batch_size)
        
        prev_return = 0
        prev_value = 0
        prev_advantage = 0

        for i in reversed(range(batch_size)):
            returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
            deltas[i] = rewards[i] + args.gamma * \
                prev_value * masks[i] - values[i]
            # ref: https://arxiv.org/pdf/1506.02438.pdf (generalization advantage estimate)
            advantages[i] = deltas[i] + args.gamma * \
                args.lamda * prev_advantage * masks[i]

            prev_return = returns[i]
            prev_value = values[i]
            prev_advantage = advantages[i]
        if args.advantage_norm:
            advantages = (advantages - advantages.mean()) / \
                (advantages.std() + EPS)

        for i_epoch in range(int(args.num_epoch * batch_size / args.minibatch_size)):
            # sample from current batch
            minibatch_ind = np.random.choice(
                batch_size, args.minibatch_size, replace=False)
            minibatch_states = states[minibatch_ind]
            minibatch_actions = actions[minibatch_ind]
            minibatch_oldlogproba = oldlogproba[minibatch_ind]
            minibatch_newlogproba = network.get_logproba(
                minibatch_states, minibatch_actions)
            minibatch_advantages = advantages[minibatch_ind]
            minibatch_returns = returns[minibatch_ind]
            minibatch_newvalues = network._forward_critic(
                minibatch_states).flatten()

            ratio = torch.exp(minibatch_newlogproba - minibatch_oldlogproba)
            surr1 = ratio * minibatch_advantages
            surr2 = ratio.clamp(1 - clip_now, 1 + clip_now) * \
                minibatch_advantages
            loss_surr = - torch.mean(torch.min(surr1, surr2))

            if args.lossvalue_norm:
                minibatch_return_6std = 6 * minibatch_returns.std()
                loss_value = torch.mean(
                    (minibatch_newvalues - minibatch_returns).pow(2)) / minibatch_return_6std
            else:
                loss_value = torch.mean(
                    (minibatch_newvalues - minibatch_returns).pow(2))

            loss_entropy = torch.mean(
                torch.exp(minibatch_newlogproba) * minibatch_newlogproba)

            total_loss = loss_surr + args.loss_coeff_value * \
                loss_value + args.loss_coeff_entropy * loss_entropy
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

        if args.schedule_clip == 'linear':
            ep_ratio = 1 - (i_episode / args.num_episode)
            clip_now = args.clip * ep_ratio

        if args.schedule_adam == 'linear':
            ep_ratio = 1 - (i_episode / args.num_episode)
            lr_now = args.lr * ep_ratio
            # set learning rate
            # ref: https://stackoverflow.com/questions/48324152/
            for g in optimizer.param_groups:
                g['lr'] = lr_now

        if i_episode % args.log_num_episode == 0:
            
            logger.info('Finished episode: {} Reward: {:.4f} total_loss = {:.4f} = {:.4f} + {} * {:.4f} + {} * {:.4f}'
                        .format(i_episode, reward_record[-1]['meanepreward'], total_loss.data, loss_surr.data, args.loss_coeff_value,
                                loss_value.data, args.loss_coeff_entropy, loss_entropy.data))
          

        if i_episode % 100 == 0:
         
            torch.save(network, "{}_episode--meanstd".format(i_episode))

    return reward_record


if __name__ == "__main__":
    print("开始")
    import os
    print(os.getpid(), flush=True)
    reward_record = ppo(args)
    print(reward_record)
