import argparse


def get_config():
    """
    The configuration parser for common hyperparameters of all environment.
    Please reach each `scripts/train/<env>_runner.py` file to find private hyperparameters
    only used in <env>.

    Prepare parameters:
        --algorithm_name <algorithm_name>
            specifiy the algorithm, including `["rmappo", "mappo", "rmappg", "mappg", "trpo"]`
        --experiment_name <str>
            an identifier to distinguish different experiment.
        --seed <int>
            set seed for numpy and torch
        --cuda
            by default True, will use GPU to train; or else will use CPU;
        --cuda_deterministic
            by default, make sure random seed effective. if set, bypass such function.
        --n_training_threads <int>
            number of training threads working in parallel. by default 1
        --n_rollout_threads <int>
            number of parallel envs for training rollout. by default 32
        --n_eval_rollout_threads <int>
            number of parallel envs for evaluating rollout. by default 1
        --n_render_rollout_threads <int>
            number of parallel envs for rendering, could only be set as 1 for some environments.
        --num_env_steps <int>
            number of env steps to train (default: 10e6)
        --user_name <str>
            [for wandb usage], to specify user's name for simply collecting training data.
        --use_wandb
            [for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.

    Env parameters:
        --env_name <str>
            specify the name of environment
        --use_obs_instead_of_state
            [only for some env] by default False, will use global state; or else will use concatenated local obs.

    Replay Buffer parameters:
        --episode_length <int>
            the max length of episode in the buffer.

    Network parameters:
        --share_policy
            by default True, all agents will share the same network; set to make training agents use different policies.
        --use_centralized_V
            by default True, use centralized training mode; or else will decentralized training mode.
        --stacked_frames <int>
            Number of input frames which should be stack together.
        --hidden_size <int>
            Dimension of hidden layers for actor/critic networks
        --layer_N <int>
            Number of layers for actor/critic networks
        --use_ReLU
            by default True, will use ReLU. or else will use Tanh.
        --use_popart
            by default True, use PopArt to normalize rewards.
        --use_valuenorm
            by default True, use running mean and std to normalize rewards.
        --use_feature_normalization
            by default True, apply layernorm to normalize inputs.
        --use_orthogonal
            by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization.
        --gain
            by default 0.01, use the gain # of last action layer
        --use_naive_recurrent_policy
            by default False, use the whole trajectory to calculate hidden states.
        --use_recurrent_policy
            by default, use Recurrent Policy. If set, do not use.
        --recurrent_N <int>
            The number of recurrent layers ( default 1).
        --data_chunk_length <int>
            Time length of chunks used to train a recurrent_policy, default 10.

    Optimizer parameters:
        --lr <float>
            learning rate parameter,  (default: 5e-4, fixed).
        --critic_lr <float>
            learning rate of critic  (default: 5e-4, fixed)
        --opti_eps <float>
            RMSprop optimizer epsilon (default: 1e-5)
        --weight_decay <float>
            coefficience of weight decay (default: 0)

    PPO parameters:
        --ppo_epoch <int>
            number of ppo epochs (default: 15)
        --use_clipped_value_loss
            by default, clip loss value. If set, do not clip loss value.
        --clip_param <float>
            ppo clip parameter (default: 0.2)
        --num_mini_batch <int>
            number of batches for ppo (default: 1)
        --entropy_coef <float>
            entropy term coefficient (default: 0.01)
        --use_max_grad_norm
            by default, use max norm of gradients. If set, do not use.
        --max_grad_norm <float>
            max norm of gradients (default: 0.5)
        --use_gae
            by default, use generalized advantage estimation. If set, do not use gae.
        --gamma <float>
            discount factor for rewards (default: 0.99)
        --gae_lambda <float>
            gae lambda parameter (default: 0.95)
        --use_proper_time_limits
            by default, the return value does consider limits of time. If set, compute returns with considering time limits factor.
        --use_huber_loss
            by default, use huber loss. If set, do not use huber loss.
        --use_value_active_masks
            by default True, whether to mask useless data in value loss.
        --huber_delta <float>
            coefficient of huber loss.

    PPG parameters:
        --aux_epoch <int>
            number of auxiliary epochs. (default: 4)
        --clone_coef <float>
            clone term coefficient (default: 0.01)

    Run parameters：
        --use_linear_lr_decay
            by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate

    Save & Log parameters:
        --save_interval <int>
            time duration between contiunous twice models saving.
        --log_interval <int>
            time duration between contiunous twice log printing.

    Eval parameters:
        --use_eval
            by default, do not start evaluation. If set`, start evaluation alongside with training.
        --eval_interval <int>
            time duration between contiunous twice evaluation progress.
        --eval_episodes <int>
            number of episodes of a single evaluation.

    Render parameters:
        --save_gifs
            by default, do not save render video. If set, save video.
        --use_render
            by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.
        --render_episodes <int>
            the number of episodes to render a given env
        --ifi <float>
            the play interval of each rendered image in saved video.

    Pretrained parameters:
        --model_dir <str>
            by default None. set the path to pretrained model.
    """

    """
    获取配置参数
    
    返回:
        解析器对象，包含所有配置参数
    """

    parser = argparse.ArgumentParser(
        description="onpolicy", formatter_class=argparse.RawDescriptionHelpFormatter
    )

    # 准备参数
    parser.add_argument(
        "--algorithm_name", type=str, default="mappo", choices=["rmappo", "mappo"]
    )

    parser.add_argument(
        "--experiment_name",
        type=str,
        default="check",
        help="用于区分不同实验的标识符",
    )
    
    parser.add_argument("--seed", type=int, default=1, help="numpy/torch的随机种子")
    
    parser.add_argument(
        "--cuda",
        action="store_false",
        default=True,
        help="默认为True，将使用GPU训练；否则使用CPU",
    )
    
    parser.add_argument(
        "--cuda_deterministic",
        action="store_false",
        default=True,
        help="默认情况下，确保随机种子有效。如果设置，则绕过此功能",
    )
    
    parser.add_argument(
        "--n_training_threads",
        type=int,
        default=2,
        help="训练的torch线程数",
    )
    
    parser.add_argument(
        "--n_rollout_threads",
        type=int,
        default=5,
        help="训练rollouts的并行环境数",
    )
    
    parser.add_argument(
        "--n_eval_rollout_threads",
        type=int,
        default=2,
        help="评估rollouts的并行环境数",
    )
    
    parser.add_argument(
        "--n_render_rollout_threads",
        type=int,
        default=1,
        help="渲染rollouts的并行环境数",
    )
    
    parser.add_argument(
        "--num_env_steps",
        type=int,
        default=10e6,
        help="要训练的环境步数（默认：10e6）",
    )
    
    parser.add_argument(
        "--user_name",
        type=str,
        default="marl",
        help="[用于wandb]，指定用户名以简单收集训练数据",
    )

    # 环境参数
    parser.add_argument("--env_name", type=str, default="MyEnv", help="指定环境名称")
    
    parser.add_argument(
        "--use_obs_instead_of_state",
        action="store_true",
        default=False,
        help="是否使用全局状态或连接的观察",
    )

    # 回放缓冲区参数
    parser.add_argument(
        "--episode_length", type=int, default=200, help="任何回合的最大长度"
    )

    # 网络参数
    parser.add_argument(
        "--share_policy",
        action="store_false",
        default=False,
        help="智能体是否共享相同的策略",
    )
    
    parser.add_argument(
        "--use_centralized_V",
        action="store_false",
        default=True,
        help="是否使用中心化的V函数",
    )
    
    parser.add_argument(
        "--stacked_frames",
        type=int,
        default=1,
        help="actor/critic网络隐藏层的维度",
    )
    
    parser.add_argument(
        "--use_stacked_frames",
        action="store_true",
        default=False,
        help="是否使用堆叠帧",
    )
    
    parser.add_argument(
        "--hidden_size",
        type=int,
        default=64,
        help="actor/critic网络隐藏层的维度",
    )
    
    parser.add_argument(
        "--layer_N",
        type=int,
        default=1,
        help="actor/critic网络的层数",
    )
    
    parser.add_argument(
        "--use_ReLU", action="store_false", default=True, help="是否使用ReLU"
    )
    
    parser.add_argument(
        "--use_popart",
        action="store_true",
        default=False,
        help="默认为False，使用PopArt来归一化奖励",
    )
    
    parser.add_argument(
        "--use_valuenorm",
        action="store_false",
        default=True,
        help="默认为True，使用运行均值和标准差来归一化奖励",
    )
    
    parser.add_argument(
        "--use_feature_normalization",
        action="store_false",
        default=True,
        help="是否对输入应用层归一化",
    )
    
    parser.add_argument(
        "--use_orthogonal",
        action="store_false",
        default=True,
        help="是否使用正交初始化权重和0初始化偏置",
    )
    
    parser.add_argument("--gain", type=float, default=0.01, help="最后动作层的增益")

    # 循环参数
    parser.add_argument(
        "--use_naive_recurrent_policy",
        action="store_true",
        default=False,
        help="是否使用简单的循环策略",
    )
    
    parser.add_argument(
        "--use_recurrent_policy",
        action="store_false",
        default=False,
        help="使用循环策略",
    )
    
    parser.add_argument("--recurrent_N", type=int, default=1, help="循环层的数量")
    
    parser.add_argument(
        "--data_chunk_length",
        type=int,
        default=10,
        help="用于训练循环策略的块的时间长度",
    )

    # 优化器参数
    parser.add_argument("--lr", type=float, default=5e-4, help="学习率（默认：5e-4）")
    
    parser.add_argument(
        "--critic_lr",
        type=float,
        default=5e-4,
        help="评论家学习率（默认：5e-4）",
    )
    
    parser.add_argument(
        "--opti_eps",
        type=float,
        default=1e-5,
        help="RMSprop优化器epsilon（默认：1e-5）",
    )
    
    parser.add_argument("--weight_decay", type=float, default=0)

    # ppo参数
    parser.add_argument("--ppo_epoch", type=int, default=15, help="ppo轮数（默认：15）")
    
    parser.add_argument(
        "--use_clipped_value_loss",
        action="store_false",
        default=True,
        help="默认情况下，裁剪损失值。如果设置，则不裁剪损失值",
    )
    
    parser.add_argument(
        "--clip_param",
        type=float,
        default=0.2,
        help="ppo裁剪参数（默认：0.2）",
    )
    
    parser.add_argument(
        "--num_mini_batch",
        type=int,
        default=1,
        help="ppo的批次数（默认：1）",
    )
    
    parser.add_argument(
        "--entropy_coef",
        type=float,
        default=0.01,
        help="熵项系数（默认：0.01）",
    )
    
    parser.add_argument(
        "--value_loss_coef",
        type=float,
        default=1,
        help="价值损失系数（默认：0.5）",
    )
    
    parser.add_argument(
        "--use_max_grad_norm",
        action="store_false",
        default=True,
        help="默认情况下，使用梯度的最大范数。如果设置，则不使用",
    )
    
    parser.add_argument(
        "--max_grad_norm",
        type=float,
        default=10.0,
        help="梯度的最大范数（默认：0.5）",
    )
    
    parser.add_argument(
        "--use_gae",
        action="store_false",
        default=True,
        help="使用广义优势估计",
    )
    
    parser.add_argument(
        "--gamma",
        type=float,
        default=0.99,
        help="奖励的折扣因子（默认：0.99）",
    )
    
    parser.add_argument(
        "--gae_lambda",
        type=float,
        default=0.95,
        help="gae lambda参数（默认：0.95）",
    )
    
    parser.add_argument(
        "--use_proper_time_limits",
        action="store_true",
        default=False,
        help="计算回报时考虑时间限制",
    )
    
    parser.add_argument(
        "--use_huber_loss",
        action="store_false",
        default=True,
        help="默认情况下，使用huber损失。如果设置，则不使用huber损失",
    )
    
    parser.add_argument(
        "--use_value_active_masks",
        action="store_false",
        default=True,
        help="默认为True，是否在价值损失中掩盖无用数据",
    )
    
    parser.add_argument(
        "--use_policy_active_masks",
        action="store_false",
        default=True,
        help="默认为True，是否在策略损失中掩盖无用数据",
    )

    parser.add_argument(
        "--huber_delta", type=float, default=10.0, help="huber损失的系数"
    )

    # 运行参数
    parser.add_argument(
        "--use_linear_lr_decay",
        action="store_true",
        default=False,
        help="对学习率使用线性调度",
    )
    # 保存参数
    parser.add_argument(
        "--save_interval",
        type=int,
        default=1,
        help="连续两次模型保存之间的时间间隔",
    )

    # 日志参数
    parser.add_argument(
        "--log_interval",
        type=int,
        default=5,
        help="连续两次日志打印之间的时间间隔",
    )

    # 评估参数
    parser.add_argument(
        "--use_eval",
        action="store_true",
        default=False,
        help="默认情况下，不开始评估。如果设置，则与训练一起开始评估",
    )

    parser.add_argument(
        "--eval_interval",
        type=int,
        default=25,
        help="连续两次评估进度之间的时间间隔",
    )

    parser.add_argument(
        "--eval_episodes",
        type=int,
        default=32,
        help="单次评估的回合数",
    )

    # 渲染参数
    parser.add_argument(
        "--save_gifs",
        action="store_true",
        default=False,
        help="默认情况下，不保存渲染视频。如果设置，则保存视频",
    )

    parser.add_argument(
        "--use_render",
        action="store_true",
        default=False,
        help="默认情况下，在训练期间不渲染环境。如果设置，则开始渲染。注意：有时，环境有内部渲染过程，不受此超参数控制",
    )

    parser.add_argument(
        "--render_episodes",
        type=int,
        default=5,
        help="渲染给定环境的回合数",
    )

    parser.add_argument(
        "--ifi",
        type=float,
        default=0.1,
        help="保存视频中每个渲染图像的播放间隔",
    )

    # 预训练参数
    parser.add_argument(
        "--model_dir",
        type=str,
        default=None,
        help="默认为None。设置预训练模型的路径",
    )

    parser.add_argument(
        "--scenario_name", type=str, default="MyEnv", help="要运行的场景"
    )

    parser.add_argument("--num_landmarks", type=int, default=3)  # 地标数量

    parser.add_argument("--num_agents", type=int, default=2, help="智能体数量")

    return parser
