import os
import sys
import time
import torch
import numpy as np
import torch.multiprocessing as mp  # torch.multiprocessing extends multiprocessing of Python
from copy import deepcopy
from multiprocessing import Process, Pipe

from elegantrl.train.config import Config, build_env
from elegantrl.train.replay_buffer import ReplayBuffer
from elegantrl.train.evaluator import Evaluator, get_cumulative_rewards_and_steps

# 检查当前操作系统是否为 Windows，如果是则设置环境变量以避免 Anaconda 的 DLL 初始化错误
if os.name == 'nt':  # if is WindowOS (Windows NT)
    """Fix bug about Anaconda in WindowOS
    OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
    """
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

'''train'''

# 单进程训练智能体的函数
def train_agent(args: Config):
    # 在训练前进行初始化操作
    args.init_before_training()
    # 关闭梯度计算，提高推理速度
    torch.set_grad_enabled(False)

    '''init environment'''
    # 根据配置信息构建训练环境
    env = build_env(args.env_class, args.env_args, args.gpu_id)

    '''init agent'''
    # 根据配置信息初始化智能体
    agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
    # 尝试加载智能体模型，如果不存在则不加载
    agent.save_or_load_agent(args.cwd, if_save=False)

    '''init agent.last_state'''
    # 重置环境，获取初始状态
    state = env.reset()
    # 如果只有一个环境
    if args.num_envs == 1:
        # 确保初始状态的形状和类型符合预期
        assert state.shape == (args.state_dim,)
        assert isinstance(state, np.ndarray)
        # 将 numpy 数组转换为 torch 张量，并添加一个批次维度
        state = torch.tensor(state, dtype=torch.float32, device=agent.device).unsqueeze(0)
    else:
        # 确保初始状态的形状和类型符合预期
        assert state.shape == (args.num_envs, args.state_dim)
        assert isinstance(state, torch.Tensor)
        # 将状态张量移动到指定设备上
        state = state.to(agent.device)
    # 确保最终状态的形状和类型符合预期
    assert state.shape == (args.num_envs, args.state_dim)
    assert isinstance(state, torch.Tensor)
    # 将最终状态赋值给智能体的 last_state 属性，并分离梯度
    agent.last_state = state.detach()

    '''init buffer'''
    # 如果是离线策略算法
    if args.if_off_policy:
        # 初始化经验回放缓冲区
        buffer = ReplayBuffer(
            gpu_id=args.gpu_id,
            num_envs=args.num_envs,
            max_size=args.buffer_size,
            state_dim=args.state_dim,
            action_dim=1 if args.if_discrete else args.action_dim,
            if_use_per=args.if_use_per,
            args=args,
        )
        # 智能体在环境中探索，收集初始经验数据
        buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
        # 将收集到的经验数据添加到缓冲区中，进行预热
        buffer.update(buffer_items)  # warm up for ReplayBuffer
    else:
        # 如果是在线策略算法，缓冲区初始化为空列表
        buffer = []

    '''init evaluator'''
    # 如果有指定评估环境类，则使用指定的类，否则使用训练环境类
    eval_env_class = args.eval_env_class if args.eval_env_class else args.env_class
    # 如果有指定评估环境参数，则使用指定的参数，否则使用训练环境参数
    eval_env_args = args.eval_env_args if args.eval_env_args else args.env_args
    # 根据配置信息构建评估环境
    eval_env = build_env(eval_env_class, eval_env_args, args.gpu_id)
    # 初始化评估器
    evaluator = Evaluator(cwd=args.cwd, env=eval_env, args=args, if_tensorboard=False)

    '''train loop'''
    # 获取当前工作目录
    cwd = args.cwd
    # 获取训练终止步数
    break_step = args.break_step
    # 获取每次探索的步数
    horizon_len = args.horizon_len
    # 获取是否为离线策略算法的标志
    if_off_policy = args.if_off_policy
    # 获取是否保存缓冲区的标志
    if_save_buffer = args.if_save_buffer
    # 删除 args 变量，避免后续误操作
    del args

    # 训练标志，控制训练循环
    if_train = True
    while if_train:
        # 智能体在环境中探索，收集经验数据
        buffer_items = agent.explore_env(env, horizon_len)

        # 计算探索过程中的平均奖励
        exp_r = buffer_items[2].mean().item()
        # 如果是离线策略算法，将经验数据添加到缓冲区中
        if if_off_policy:
            buffer.update(buffer_items)
        else:
            # 如果是在线策略算法，直接更新缓冲区
            buffer[:] = buffer_items

        # 开启梯度计算，用于更新智能体网络
        torch.set_grad_enabled(True)
        # 智能体使用缓冲区中的数据更新网络，并返回日志信息
        logging_tuple = agent.update_net(buffer)
        # 关闭梯度计算，提高推理速度
        torch.set_grad_enabled(False)

        # 评估器对当前智能体进行评估，并保存训练日志
        evaluator.evaluate_and_save(actor=agent.act, steps=horizon_len, exp_r=exp_r, logging_tuple=logging_tuple)
        # 判断是否继续训练，当总步数未达到终止步数且未收到停止文件时继续训练
        if_train = (evaluator.total_step <= break_step) and (not os.path.exists(f"{cwd}/stop"))

    # 打印训练耗时和保存目录信息
    print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')

    # 如果环境有 close 方法，则关闭环境
    env.close() if hasattr(env, 'close') else None
    # 评估器保存训练曲线图片
    evaluator.save_training_curve_jpg()
    # 保存智能体模型
    agent.save_or_load_agent(cwd, if_save=True)
    # 如果需要保存缓冲区且缓冲区有保存方法，则保存缓冲区数据
    if if_save_buffer and hasattr(buffer, 'save_or_load_history'):
        buffer.save_or_load_history(cwd, if_save=True)

# 多进程训练智能体的函数
def train_agent_multiprocessing(args: Config):
    # 在训练前进行初始化操作
    args.init_before_training()

    """Don't set method='fork' when send tensor in GPU"""
    # 根据操作系统选择合适的多进程启动方法，Windows 使用 'spawn'，其他系统使用 'forkserver'
    method = 'spawn' if os.name == 'nt' else 'forkserver'  # os.name == 'nt' means Windows NT operating system (WinOS)
    # 设置多进程启动方法
    mp.set_start_method(method=method, force=True)

    '''build the Pipe'''
    # 创建多个单向管道，用于 Worker 与 Learner 之间的通信
    worker_pipes = [Pipe(duplex=False) for _ in range(args.num_workers)]  # receive, send
    # 创建一个单向管道，用于 Learner 与 Workers 之间的通信
    learner_pipe = Pipe(duplex=False)
    # 创建一个双向管道，用于 Learner 与 Evaluator 之间的通信
    evaluator_pipe = Pipe(duplex=True)

    '''build Process'''
    # 创建 Learner 进程
    learner = Learner(learner_pipe=learner_pipe, worker_pipes=worker_pipes, evaluator_pipe=evaluator_pipe, args=args)
    # 创建多个 Worker 进程
    workers = [Worker(worker_pipe=worker_pipe, learner_pipe=learner_pipe, worker_id=worker_id, args=args)
               for worker_id, worker_pipe in enumerate(worker_pipes)]
    # 创建 Evaluator 进程
    evaluator = EvaluatorProc(evaluator_pipe=evaluator_pipe, args=args)

    '''start Process'''
    # 将所有进程添加到列表中
    process_list = [learner, *workers, evaluator]
    # 启动所有进程
    [process.start() for process in process_list]
    # 等待所有进程执行完毕
    [process.join() for process in process_list]

# Learner 进程类，负责更新智能体网络
class Learner(Process):
    def __init__(self, learner_pipe: Pipe, worker_pipes: [Pipe], evaluator_pipe: Pipe, args: Config):
        super().__init__()
        # 获取 Learner 用于接收数据的管道
        self.recv_pipe = learner_pipe[0]
        # 获取用于向 Workers 发送数据的管道列表
        self.send_pipes = [worker_pipe[1] for worker_pipe in worker_pipes]
        # 获取用于与 Evaluator 通信的管道
        self.eval_pipe = evaluator_pipe[1]
        # 保存配置信息
        self.args = args

    def run(self):
        # 获取配置信息
        args = self.args
        # 关闭梯度计算，提高推理速度
        torch.set_grad_enabled(False)

        '''init agent'''
        # 根据配置信息初始化智能体
        agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
        # 尝试加载智能体模型，如果不存在则不加载
        agent.save_or_load_agent(args.cwd, if_save=False)

        '''init buffer'''
        # 计算总的缓冲区数量
        num_buffers = args.num_envs * args.num_workers
        # 如果是离线策略算法
        if args.if_off_policy:
            # 初始化经验回放缓冲区
            buffer = ReplayBuffer(
                gpu_id=args.gpu_id,
                num_envs=num_buffers,
                max_size=args.buffer_size,
                state_dim=args.state_dim,
                action_dim=1 if args.if_discrete else args.action_dim,
                if_use_per=args.if_use_per,
                args=args,
            )
        else:
            # 如果是在线策略算法，缓冲区初始化为空列表
            buffer = []

        '''loop'''
        # 获取是否为离线策略算法的标志
        if_off_policy = args.if_off_policy
        # 获取是否保存缓冲区的标志
        if_save_buffer = args.if_save_buffer
        # 获取每次探索的步数
        horizon_len = args.horizon_len
        # 获取 Worker 进程的数量
        num_workers = args.num_workers
        # 确保 Worker 进程数量与发送管道数量一致
        assert num_workers == len(self.send_pipes)
        # 获取环境状态维度
        num_envs = args.num_envs
        # 获取环境状态维度
        state_dim = args.state_dim
        # 获取动作维度
        action_dim = args.action_dim
        # 计算每次循环的总步数
        steps = args.horizon_len * args.num_workers
        # 获取当前工作目录
        cwd = args.cwd
        # 删除 args 变量，避免后续误操作
        del args

        # 初始化智能体的 last_state 张量
        agent.last_state = torch.empty((num_buffers, state_dim), dtype=torch.float32, device=agent.device)

        # 初始化存储状态、动作、奖励和未结束标志的张量
        states = torch.empty((horizon_len, num_buffers, state_dim), dtype=torch.float32, device=agent.device)
        actions = torch.empty((horizon_len, num_buffers, action_dim), dtype=torch.float32, device=agent.device)
        rewards = torch.empty((horizon_len, num_buffers), dtype=torch.float32, device=agent.device)
        undones = torch.empty((horizon_len, num_buffers), dtype=torch.bool, device=agent.device)
        # 如果是离线策略算法
        if if_off_policy:
            # 定义缓冲区数据张量元组
            buffer_items_tensor = (states, actions, rewards, undones)
        else:
            # 初始化存储对数概率的张量
            logprobs = torch.empty((horizon_len, num_buffers, action_dim), dtype=torch.float32, device=agent.device)
            # 定义缓冲区数据张量元组
            buffer_items_tensor = (states, actions, logprobs, rewards, undones)

        # 训练标志，控制训练循环
        if_train = True
        while if_train:
            '''Learner send actor to Workers'''
            # 将当前智能体的 actor 网络发送给所有 Worker 进程
            for send_pipe in self.send_pipes:
                send_pipe.send(agent.act)

            '''Learner receive (buffer_items, last_state) from Workers'''
            # 从每个 Worker 进程接收经验数据和最后状态
            for _ in range(num_workers):
                worker_id, buffer_items, last_state = self.recv_pipe.recv()

                # 计算当前 Worker 数据在总缓冲区中的起始和结束索引
                buf_i = worker_id * num_envs
                buf_j = worker_id * num_envs + num_envs
                # 将每个 Worker 的经验数据复制到对应的缓冲区张量中
                for buffer_item, buffer_tensor in zip(buffer_items, buffer_items_tensor):
                    buffer_tensor[:, buf_i:buf_j] = buffer_item
                # 更新智能体的 last_state
                agent.last_state[buf_i:buf_j] = last_state

            '''Learner update training data to (buffer, agent)'''
            # 如果是离线策略算法，将经验数据添加到缓冲区中
            if if_off_policy:
                buffer.update(buffer_items_tensor)
            else:
                # 如果是在线策略算法，直接更新缓冲区
                buffer[:] = buffer_items_tensor

            '''agent update network using training data'''
            # 开启梯度计算，用于更新智能体网络
            torch.set_grad_enabled(True)
            # 智能体使用缓冲区中的数据更新网络，并返回日志信息
            logging_tuple = agent.update_net(buffer)
            # 关闭梯度计算，提高推理速度
            torch.set_grad_enabled(False)

            '''Learner receive training signal from Evaluator'''
            # 检查 Evaluator 管道中是否有数据可读
            if self.eval_pipe.poll():  # whether there is any data available to be read of this pipe
                # 从 Evaluator 接收训练信号，True 表示 Evaluator 处于空闲状态
                if_train = self.eval_pipe.recv()  # True means evaluator in idle moments.
                # 获取当前智能体的 actor 网络，用于发送给 Evaluator 进行评估
                actor = agent.act  # so Leaner send an actor to evaluator for evaluation.
            else:
                actor = None

            '''Learner send actor and training log to Evaluator'''
            # 计算探索过程中的平均奖励
            exp_r = buffer_items_tensor[2].mean().item()  # the average rewards of exploration
            # 将 actor 网络、步数、平均奖励和日志信息发送给 Evaluator
            self.eval_pipe.send((actor, steps, exp_r, logging_tuple))

        '''Learner send the terminal signal to workers after break the loop'''
        # 训练结束后，向所有 Worker 进程发送终止信号
        for send_pipe in self.send_pipes:
            send_pipe.send(None)

        '''save'''
        # 保存智能体模型
        agent.save_or_load_agent(cwd, if_save=True)
        # 如果需要保存缓冲区且缓冲区有保存方法，则保存缓冲区数据
        if if_save_buffer and hasattr(buffer, 'save_or_load_history'):
            print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
            buffer.save_or_load_history(cwd, if_save=True)
            print(f"| LearnerPipe.run: ReplayBuffer saved  in {cwd}")

# Worker 进程类，负责在环境中探索并收集经验数据
class Worker(Process):
    def __init__(self, worker_pipe: Pipe, learner_pipe: Pipe, worker_id: int, args: Config):
        super().__init__()
        # 获取 Worker 用于接收数据的管道
        self.recv_pipe = worker_pipe[0]
        # 获取 Worker 用于发送数据的管道
        self.send_pipe = learner_pipe[1]
        # 保存当前 Worker 的 ID
        self.worker_id = worker_id
        # 保存配置信息
        self.args = args

    def run(self):
        # 获取配置信息
        args = self.args
        # 获取当前 Worker 的 ID
        worker_id = self.worker_id
        # 关闭梯度计算，提高推理速度
        torch.set_grad_enabled(False)

        '''init environment'''
        # 根据配置信息构建训练环境
        env = build_env(args.env_class, args.env_args, args.gpu_id)

        '''init agent'''
        # 根据配置信息初始化智能体
        agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
        # 尝试加载智能体模型，如果不存在则不加载
        agent.save_or_load_agent(args.cwd, if_save=False)

        '''init agent.last_state'''
        # 重置环境，获取初始状态
        state = env.reset()
        # 如果只有一个环境
        if args.num_envs == 1:
            # 确保初始状态的形状和类型符合预期
            assert state.shape == (args.state_dim,)
            assert isinstance(state, np.ndarray)
            # 将 numpy 数组转换为 torch 张量，并添加一个批次维度
            state = torch.tensor(state, dtype=torch.float32, device=agent.device).unsqueeze(0)
        else:
            # 确保初始状态的形状和类型符合预期
            assert state.shape == (args.num_envs, args.state_dim)
            assert isinstance(state, torch.Tensor)
            # 将状态张量移动到指定设备上
            state = state.to(agent.device)
        # 确保最终状态的形状和类型符合预期
        assert state.shape == (args.num_envs, args.state_dim)
        assert isinstance(state, torch.Tensor)
        # 将最终状态赋值给智能体的 last_state 属性，并分离梯度
        agent.last_state = state.detach()

        '''init buffer'''
        # 获取每次探索的步数
        horizon_len = args.horizon_len
        # 如果是离线策略算法
        if args.if_off_policy:
            # 智能体在环境中探索，收集初始经验数据
            buffer_items = agent.explore_env(env, args.horizon_len, if_random=True)
            # 将收集到的经验数据和最后状态发送给 Learner 进程
            self.send_pipe.send((worker_id, buffer_items, agent.last_state))

        '''loop'''
        # 删除 args 变量，避免后续误操作
        del args

        while True:
            '''Worker receive actor from Learner'''
            # 从 Learner 进程接收 actor 网络
            actor = self.recv_pipe.recv()
            # 如果接收到的 actor 为 None，表示训练结束，退出循环
            if actor is None:
                break

            '''Worker send the training data to Learner'''
            # 将接收到的 actor 网络赋值给智能体
            agent.act = actor
            # 智能体在环境中探索，收集经验数据
            buffer_items = agent.explore_env(env, horizon_len)
            # 将收集到的经验数据和最后状态发送给 Learner 进程
            self.send_pipe.send((worker_id, buffer_items, agent.last_state))

        # 如果环境有 close 方法，则关闭环境
        env.close() if hasattr(env, 'close') else None

# Evaluator 进程类，负责评估智能体并控制训练流程
class EvaluatorProc(Process):
    def __init__(self, evaluator_pipe: Pipe, args: Config):
        super().__init__()
        # 获取 Evaluator 用于通信的管道
        self.pipe = evaluator_pipe[0]
        # 保存配置信息
        self.args = args

    def run(self):
        # 获取配置信息
        args = self.args
        # 关闭梯度计算，提高推理速度
        torch.set_grad_enabled(False)

        '''wandb(weights & biases): Track and visualize all the pieces of your machine learning pipeline.'''
        # 初始化 wandb 变量
        wandb = None
        # 如果配置中指定使用 wandb
        if getattr(args, 'if_use_wandb', False):
            import wandb
            # 设置 wandb 项目名称
            wandb_project_name = "train"
            # 初始化 wandb
            wandb.init(project=wandb_project_name)

        '''init evaluator'''
        # 如果有指定评估环境类，则使用指定的类，否则使用训练环境类
        eval_env_class = args.eval_env_class if args.eval_env_class else args.env_class
        # 如果有指定评估环境参数，则使用指定的参数，否则使用训练环境参数
        eval_env_args = args.eval_env_args if args.eval_env_args else args.env_args
        # 根据配置信息构建评估环境
        eval_env = build_env(eval_env_class, eval_env_args, args.gpu_id)
        # 初始化评估器
        evaluator = Evaluator(cwd=args.cwd, env=eval_env, args=args, if_tensorboard=False)

        '''loop'''
        # 获取当前工作目录
        cwd = args.cwd
        # 获取训练终止步数
        break_step = args.break_step
        # 获取计算设备
        device = torch.device(f"cuda:{args.gpu_id}" if (torch.cuda.is_available() and (args.gpu_id >= 0)) else "cpu")
        # 删除 args 变量，避免后续误操作
        del args

        # 训练标志，控制训练循环
        if_train = True
        while if_train:
            '''Evaluator receive training log from Learner'''
            # 从 Learner 进程接收 actor 网络、步数、平均奖励和日志信息
            actor, steps, exp_r, logging_tuple = self.pipe.recv()
            # 如果使用 wandb，记录日志信息
            wandb.log({"obj_cri": logging_tuple[0], "obj_act": logging_tuple[1]}) if wandb else None

            '''Evaluator evaluate the actor and save the training log'''
            # 如果接收到的 actor 为 None，只更新总步数，不进行评估
            if actor is None:
                evaluator.total_step += steps  # update total_step but don't update recorder
            else:
                # 将 actor 网络移动到指定设备上
                actor = actor.to(device)
                # 评估器对当前智能体进行评估，并保存训练日志
                evaluator.evaluate_and_save(actor, steps, exp_r, logging_tuple)

            '''Evaluator send the training signal to Learner'''
            # 判断是否继续训练，当总步数未达到终止步数且未收到停止文件时继续训练
            if_train = (evaluator.total_step <= break_step) and (not os.path.exists(f"{cwd}/stop"))
            # 将训练信号发送给 Learner 进程
            self.pipe.send(if_train)

        '''Evaluator save the training log and draw the learning curve'''
        # 评估器保存训练曲线图片
        evaluator.save_training_curve_jpg()
        # 打印训练耗时和保存目录信息
        print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')

        # 如果评估环境有 close 方法，则关闭评估环境
        eval_env.close() if hasattr(eval_env, 'close') else None

'''render'''

# 渲染智能体的函数，用于可视化智能体在环境中的表现
def render_agent(env_class, env_args: dict, net_dims: [int], agent_class, actor_path: str, render_times: int = 8):
    # 根据环境类和参数构建环境
    env = build_env(env_class, env_args)

    # 从环境参数中获取状态维度
    state_dim = env_args['state_dim']
    # 从环境参数中获取动作维度
    action_dim = env_args['action_dim']
    # 根据配置信息初始化智能体
    agent = agent_class(net_dims, state_dim, action_dim, gpu_id=-1)
    # 获取智能体的 actor 网络
    actor = agent.act
    # 删除智能体对象，释放内存
    del agent

    # 打印加载 actor 模型的信息
    print(f"| render and load actor from: {actor_path}")
    # 加载预训练的 actor 模型
    actor.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
    # 循环进行多次渲染
    for i in range(render_times):
        # 在环境中运行智能体，获取累积奖励和步数
        cumulative_reward, episode_step = get_cumulative_rewards_and_steps(env, actor, if_render=True)
        # 打印每次渲染的累积奖励和步数信息
        print(f"|{i:4}  cumulative_reward {cumulative_reward:9.3f}  episode_step {episode_step:5.0f}")
    env.close() if hasattr(env, 'close') else None