#!/usr/bin/env python3
'''
本章算法和03章差别不大，主要在于有多个环境，加快变异的过程
'''
import sys
import gymnasium as gym
import roboschool
import collections
import copy
import time
import numpy as np

import torch
import torch.nn as nn
import torch.multiprocessing as mp

from tensorboardX import SummaryWriter


NOISE_STD = 0.01
POPULATION_SIZE = 2000 # 种群的数量，网络的数量
PARENTS_COUNT = 10 # todo 要进行遗传的父母个数？
WORKERS_COUNT = 6 # 子进程的数量
SEEDS_PER_WORKER = POPULATION_SIZE // WORKERS_COUNT # todo 每个子进程需要的SEEDS
MAX_SEED = 2**32 - 1


class Net(nn.Module):
    def __init__(self, obs_size, act_size, hid_size=64):
        super(Net, self).__init__()

        self.mu = nn.Sequential(
            nn.Linear(obs_size, hid_size),
            nn.Tanh(),
            nn.Linear(hid_size, hid_size),
            nn.Tanh(),
            nn.Linear(hid_size, act_size),
            nn.Tanh(),
        )

    def forward(self, x):
        return self.mu(x)


def evaluate(env, net):
    '''
    评估网路
    '''
    obs = env.reset()
    reward = 0.0
    steps = 0
    while True:
        obs_v = torch.FloatTensor([obs])
        action_v = net(obs_v)
        obs, r, done, _ = env.step(action_v.data.numpy()[0])
        reward += r
        steps += 1
        if done:
            break
    return reward, steps


def mutate_net(net, seed, copy_net=True):
    # 拷贝网络
    new_net = copy.deepcopy(net) if copy_net else net
    # 设置随机数
    np.random.seed(seed)
    for p in new_net.parameters():
        # 生成和网路层参数相同维度的噪声
        noise_t = torch.tensor(np.random.normal(size=p.data.size()).astype(np.float32))
        p.data += NOISE_STD * noise_t
    return new_net


def build_net(env, seeds):
    '''
    构建网络
    '''
    # 设置种子，设置完成后，构建Net后，其内部的种子就是设置的种子了
    torch.manual_seed(seeds[0])
    net = Net(env.observation_space.shape[0], env.action_space.shape[0])
    for seed in seeds[1:]:
        # 网络变异，会把所有的seed都拿去变异一次
        net = mutate_net(net, seed, copy_net=False)
    return net


OutputItem = collections.namedtuple('OutputItem', field_names=['seeds', 'reward', 'steps'])


def worker_func(input_queue, output_queue):
    '''
    每个工作进程有独立的环境
    '''
    env = gym.make("RoboschoolHalfCheetah-v1")
    # todo 貌似整个代码并未限制net_seeds长度，那么是否会无限制增长？
    cache = {}

    while True:
        # 得到父母网络的种子
        parents = input_queue.get()
        if parents is None:
            break
        new_cache = {}
        for net_seeds in parents:
            if len(net_seeds) > 1:
                # 如果种子的长度大于1，说明是需要进行遗传的父母
                # 根据种子从缓存中得到网络
                net = cache.get(net_seeds[:-1])
                if net is not None:
                    # 如果缓存中存在网络，则进行网络变异
                    # 构建的时候会将所有的seed全部都设置到网络中
                    # 而更新的时候仅拿最新的seed设置到网络中
                    net = mutate_net(net, net_seeds[-1])
                else:
                    net = build_net(env, net_seeds)
            else:
                net = build_net(env, net_seeds)
            # 以网络种子为键，网络为值，放入缓存中
            new_cache[net_seeds] = net
            reward, steps = evaluate(env, net)
            output_queue.put(OutputItem(seeds=net_seeds, reward=reward, steps=steps))
        cache = new_cache


if __name__ == "__main__":
    mp.set_start_method('spawn')
    writer = SummaryWriter(comment="-cheetah-ga")

    # 子进程输入队列，存放的是给子进程的变异种子
    input_queues = []
    # output_queue 子进程返回给主进程的参数队列
    # 内容：（所使用的种子list，奖励，步数）
    output_queue = mp.Queue(maxsize=WORKERS_COUNT)
    workers = []
    for _ in range(WORKERS_COUNT):
        input_queue = mp.Queue(maxsize=1)
        input_queues.append(input_queue)
        w = mp.Process(target=worker_func, args=(input_queue, output_queue))
        w.start()
        # 根据每个子进程的的输入种子的大小SEED_PER_WORKER，得到每个子进程的种子列表
        # seeds存放的东西是[(seed1，，，),(seed2，，，),...]
        seeds = [(np.random.randint(MAX_SEED),) for _ in range(SEEDS_PER_WORKER)]
        # 将得到的种子列表放入输入队列
        input_queue.put(seeds)

    gen_idx = 0
    elite = None
    while True:
        t_start = time.time()
        batch_steps = 0 # 统计所有子进程的步数
        population = [] # 存放的内容是[(seed1, reward1),(seed2, reward2),...]
        while len(population) < SEEDS_PER_WORKER * WORKERS_COUNT:
            # 收集到的种群数量小于目标大小
            # 则继续从子进程中收集
            out_item = output_queue.get()
            population.append((out_item.seeds, out_item.reward))
            batch_steps += out_item.steps
        if elite is not None:
            # 如果评估最好的网络存在，则将其加入种群
            population.append(elite)
        # 对所有种群进行奖励排序
        population.sort(key=lambda p: p[1], reverse=True)
        # 得到排名前PARENTS_COUNT的种群的奖励平均值、最大值、标准差
        rewards = [p[1] for p in population[:PARENTS_COUNT]]
        reward_mean = np.mean(rewards)
        reward_max = np.max(rewards)
        reward_std = np.std(rewards)
        # 记录到tensorboard中
        writer.add_scalar("reward_mean", reward_mean, gen_idx)
        writer.add_scalar("reward_std", reward_std, gen_idx)
        writer.add_scalar("reward_max", reward_max, gen_idx)
        writer.add_scalar("batch_steps", batch_steps, gen_idx)
        writer.add_scalar("gen_seconds", time.time() - t_start, gen_idx)
        # 计算速度
        speed = batch_steps / (time.time() - t_start)
        writer.add_scalar("speed", speed, gen_idx)
        print("%d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f, speed=%.2f f/s" % (
            gen_idx, reward_mean, reward_max, reward_std, speed))

        # 保存最好的种子
        elite = population[0]
        for worker_queue in input_queues:
            # 遍历每个子进程的输入队列
            seeds = []
            for _ in range(SEEDS_PER_WORKER):
                # 循环大小是每个进程需要的种子数
                # 得到随机数，随机数的范围是需要遗传父母的个数之内
                parent = np.random.randint(PARENTS_COUNT)
                # 得到下一个随机数，随机数的范围是0到32位正数最大值
                next_seed = np.random.randint(MAX_SEED)
                # 将list(population[parent][0])元组转list
                # 将父母的种子和新种子合并为一个list后再转换为元组，推进到seeds队列中
                # 将新的种子推进到seeds队列
                seeds.append(tuple(list(population[parent][0]) + [next_seed]))
            # 将得到的新种子推进到子进程的输入队列中
            worker_queue.put(seeds)
        gen_idx += 1

    pass
