#!/usr/bin/env python3
'''
待验证

训练记录：
20241004： 经过8小时的训练，目前训练奖励趋势是缓慢上升，不过训练分数依旧是-20+多，可能总体训练次数很多，但是主线程轮数太慢，目前到了第500代（有可能ga方法不适合解决这个动作数和观察空间较大的游戏）。todo 发现代码存在问题，测试模型的代码没有运行，目前还未保存最好的模型，理论上至少会保存一次

训练分数卡在-26分左右很久，最高的训练分数也卡在-25分，感觉无法继续提升，应该是ga方法不适合解决动作空间较高，观察空间较高的问题上，先暂时停止训练，解决todo在继续训练

todo 增加可持续化训练方法，通过两个长时间训练看看效果

20241016：训练分数达到-28，测试分数达到-38，可能是训练的时间不够久，且不支持持久化训练，需要进一步验证
'''
import gymnasium as gym
import argparse
import itertools
import collections
import copy
import time
import numpy as np
import os
from numpy.random import default_rng
import ptan
import torch
import torch.nn as nn
import torch.multiprocessing as mp

from tensorboardX import SummaryWriter


NOISE_STD = 0.005 # 噪音的权重
POPULATION_SIZE = 2000
PARENTS_COUNT = 10
WORKERS_COUNT = 2
SEEDS_PER_WORKER = POPULATION_SIZE // WORKERS_COUNT
MAX_SEED = 2**32 - 1
TEST_FREQUENCY = 3


class MultiNoiseLinear(nn.Linear):

    '''
    构建噪音线性层
    '''
    def set_noise_dim(self, dim, device="cpu"):
        #  这里的dim是表示遗传了多少次，根据遗传的次数，设置噪音的维度，生成一个(遗传次数，噪声网络的输出维度，噪声网络的输入维度)的张量
        # 这样就一次性生成所有的遗传噪音
        # todo 这样做的原因是什么？有什么好处？
        assert isinstance(dim, int)
        assert dim > 0
        # 注册噪音权重
        self.register_buffer('noise', torch.FloatTensor(dim, self.out_features, self.in_features).to(device))
        # 注册噪音偏执
        self.register_buffer('noise_bias', torch.FloatTensor(dim, self.out_features).to(device))

    def sample_noise_row(self, row, device="cpu"):
        # sample noise for our params
        # 进行噪音采样更新到噪音的权重和偏执中
        # row参数的作用，指定对第N次遗传的种子进行采样
        w_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.weight.data.size()).astype(np.float32)).to(device)
        b_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.bias.data.size()).astype(np.float32)).to(device)
        self.noise[row].copy_(w_noise)
        self.noise_bias[row].copy_(b_noise)

    def zero_noise(self):
        # 噪音置零，应该是用于推理评估时使用
        self.noise.zero_()
        self.noise_bias.zero_()

    def forward(self, x):
        # 进行线性运算
        o = super(MultiNoiseLinear, self).forward(x)
        # 将噪音和输入的数据进行矩阵相乘
        # x.data.unsqueeze(-1)仅仅只是给最里层的张量增加一个维度
        # squeeze(-1)仅仅是去掉最里层的维度
        # 这里直接将遗传信息一次性的加入到网络中，而不是每次遗传都进行一次性的加入
        # todo 了解这边的shape变化
        o_n = torch.matmul(self.noise, x.data.unsqueeze(-1)).squeeze(-1)
        # 将添加了噪音的输入数据+噪音偏执加入到输出中
        o.data += o_n + self.noise_bias
        return o


class Net(nn.Module):
    def __init__(self, obs_size, act_size, hid_size=64):
        super(Net, self).__init__()

        self.nonlin = nn.Tanh()
        self.l1 = MultiNoiseLinear(obs_size, hid_size)
        self.l2 = MultiNoiseLinear(hid_size, hid_size)
        self.l3 = MultiNoiseLinear(hid_size, act_size)

    def forward(self, x):
        l1 = self.nonlin(self.l1(x))
        l2 = self.nonlin(self.l2(l1))
        l3 = self.nonlin(self.l3(l2))
        return l3 * 2

    def set_noise_seeds(self, seeds, device="cpu"):
        # 设置噪音种子
        batch_size = len(seeds)
        # 设置每一层噪音的初始维度，不在构建的时候创建，而是在使用的时候创建
        self.l1.set_noise_dim(batch_size, device)
        self.l2.set_noise_dim(batch_size, device)
        self.l3.set_noise_dim(batch_size, device)

        for idx, seed in enumerate(seeds):
            np.random.seed(seed)
            # 给每一层设置相同的种子后，对噪音进行采样
            self.l1.sample_noise_row(idx, device)
            self.l2.sample_noise_row(idx, device)
            self.l3.sample_noise_row(idx, device)

    def zero_noise(self, batch_size, device="cpu"):
        self.l1.set_noise_dim(batch_size, device)
        self.l2.set_noise_dim(batch_size, device)
        self.l3.set_noise_dim(batch_size, device)
        self.l1.zero_noise()
        self.l2.zero_noise()
        self.l3.zero_noise()


def evaluate(env, net, device="cpu"):
    obs, _ = env.reset()
    reward = 0.0
    steps = 0
    while True:
        obs_v = torch.FloatTensor(np.array([obs])).to(device)
        action_v = net(obs_v)
        obs, r, done, trunc, _ = env.step(action_v.data.cpu().numpy()[0])
        reward += r
        steps += 1
        if done or trunc:
            break
    return reward, steps


def evaluate_batch(envs, net, device="cpu"):
    count = len(envs)
    obs = [e.reset()[0] for e in envs]
    rewards = [0.0 for _ in range(count)]
    steps = [0 for _ in range(count)]
    done_set = set()

    while len(done_set) < count:
        obs_v = torch.FloatTensor(np.array(obs)).to(device)
        out_v = net(obs_v)
        out = out_v.data.cpu().numpy()
        for i in range(count):
            if i in done_set:
                continue
            new_o, r, done, trunc, _ = envs[i].step(out[i])
            obs[i] = new_o
            rewards[i] += r
            steps[i] += 1
            if done or trunc:
                done_set.add(i)
    return rewards, steps


def mutate_net(net, seed, copy_net=True):
    new_net = copy.deepcopy(net) if copy_net else net
    np.random.seed(seed)
    for p in new_net.parameters():
        noise_t = torch.from_numpy(np.random.normal(size=p.data.size()).astype(np.float32))
        p.data += NOISE_STD * noise_t
    return new_net


def build_net(env, seeds):
    torch.manual_seed(seeds[0])
    net = Net(env.observation_space.shape[0], env.action_space.shape[0])
    for seed in seeds[1:]:
        net = mutate_net(net, seed, copy_net=False)
    return net


OutputItem = collections.namedtuple('OutputItem', field_names=['seeds', 'reward', 'steps'])


def worker_func(input_queue, output_queue, device="cpu"):
    env_pool = [gym.make("Pusher-v4")]

    # first generation -- just evaluate given single seeds
    # 这和之前的ga不同之处在于，这里直接将初始的ga种子进行处理
    # 直接生成始祖网络进行评估并将评估的结果返回给主进程
    parents = input_queue.get()
    for seed in parents:
        net = build_net(env_pool[0], seed).to(device)
        net.zero_noise(batch_size=1, device=device)
        reward, steps = evaluate(env_pool[0], net, device)
        output_queue.put((seed, reward, steps))

    while True:
        # 在mq.Queue的get返回不止会返回数据，还会将数据从队列中删除，所以这里需要等待
        # 主进程将新的种子放入队列中
        parents = input_queue.get()
        if parents is None:
            break
        parents.sort()
        for parent_seeds, children_iter in itertools.groupby(parents, key=lambda s: s[:-1]):
            batch = list(children_iter)
            # todo 这的childen_iter是什么？
            children_seeds = [b[-1] for b in batch]
            net = build_net(env_pool[0], parent_seeds).to(device)
            net.set_noise_seeds(children_seeds, device)
            batch_size = len(children_seeds)
            while len(env_pool) < batch_size:
                env_pool.append(gym.make("Pusher-v4"))
            rewards, steps = evaluate_batch(env_pool[:batch_size], net, device)
            for seeds, reward, step in zip(batch, rewards, steps):
                output_queue.put((seeds, reward, step))


def test_net(net, env, count=10, device="cpu"):
    '''
    测试网络，专门用于测试训练的结果，不需要进行探索

    net: 网络
    env: 环境
    count: 测试次数
    device: 设备

    return： （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    net.zero_noise(batch_size=1, device=device)  # 确保噪音被置零
    for _ in range(count):
        # 重置环境
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            # 计算均值，在高斯分布中，均值是所有的分布中，最有可能的值，所以使用均值作为执行的动作值
            mu_v = net(obs_v)[0]
            action = mu_v.squeeze(dim=0).data.cpu().numpy()
            obs, reward, done, trunc, _ = env.step(action)
            rewards += reward
            steps += 1
            if done or trunc:
                break
    return rewards / count, steps / count


if __name__ == "__main__":
    mp.set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true')
    args = parser.parse_args()
    writer = SummaryWriter(comment="-pusher-ga-batch")
    device = "cuda" if args.cuda else "cpu"

    rng = default_rng()
    test_env = gym.make("Pusher-v4")
    save_path = os.path.join("saves", "ga-Pusher")
    os.makedirs(save_path, exist_ok=True)

    input_queues = []
    output_queue = mp.Queue(maxsize=WORKERS_COUNT)
    workers = []
    for _ in range(WORKERS_COUNT):
        input_queue = mp.Queue(maxsize=1)
        input_queues.append(input_queue)
        w = mp.Process(target=worker_func, args=(input_queue, output_queue, device))
        w.start()
        seeds = [(rng.integers(0, MAX_SEED, dtype=np.int64),) for _ in range(SEEDS_PER_WORKER)]
        input_queue.put(seeds)

    gen_idx = 0
    elite = None
    best_reward = float('-inf')
    while True:
        t_start = time.time()
        batch_steps = 0
        population = []
        # 这里依旧是等待所有的子进程将结果返回给主进程
        while len(population) < SEEDS_PER_WORKER * WORKERS_COUNT:
            seeds, reward, steps = output_queue.get()
            population.append((seeds, reward))
            batch_steps += steps
        if elite is not None:
            population.append(elite)
        # 依旧是按照奖励进行降序排序
        population.sort(key=lambda p: p[1], reverse=True)
        # 依旧是计算前PARENTS_COUNT的奖励的平均值、最大值、标准差
        rewards = [p[1] for p in population[:PARENTS_COUNT]]
        reward_mean = np.mean(rewards)
        reward_max = np.max(rewards)
        reward_std = np.std(rewards)
        writer.add_scalar("reward_mean", reward_mean, gen_idx)
        writer.add_scalar("reward_std", reward_std, gen_idx)
        writer.add_scalar("reward_max", reward_max, gen_idx)
        writer.add_scalar("batch_steps", batch_steps, gen_idx)
        writer.add_scalar("gen_seconds", time.time() - t_start, gen_idx)
        speed = batch_steps / (time.time() - t_start)
        writer.add_scalar("speed", speed, gen_idx)
        print("%d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f, speed=%.2f f/s" % (
            gen_idx, reward_mean, reward_max, reward_std, speed))

        # todo elite的作用
        elite = population[0]
        for worker_queue in input_queues:
            seeds = []
            for _ in range(SEEDS_PER_WORKER):
                parent = np.random.randint(PARENTS_COUNT)
                next_seed = rng.integers(0, MAX_SEED, dtype=np.int64)
                seeds.append(tuple(list(population[parent][0]) + [next_seed]))
            worker_queue.put(seeds)
        # todo 这里有bug，没有执行
        if gen_idx % TEST_FREQUENCY == 0:
            best_net = None
            for i in range(PARENTS_COUNT):
                best_seeds = population[i][0]
                for _ in range(10):
                    currnet_net = build_net(test_env, best_seeds)
                    reward, steps = test_net(currnet_net, test_env, count=50)
                    if reward > best_reward:
                        best_reward = reward
                        best_net = currnet_net
                        print(f"Best net evaluated in generation {gen_idx}: reward={reward:.2f}, steps={steps:.2f}")
            if best_net is not None:
                torch.save(best_net.state_dict(), os.path.join(save_path, f"best_model_gen_{gen_idx}_reward_{best_reward}.pth"))
                print(f"Saved best model at generation {gen_idx}")


            checkpoints = {
                "gen_idx": gen_idx,
                "rng_state": rng.bit_generator.state,
            }
        gen_idx += 1

    pass
