import { CCInteger, Component, _decorator, assert, warn,log } from 'cc';
import { ActionSpec } from './Actuators/ActionSpec';
import { ActionBuffers, IActionReceiver } from './Actuators/IActionReceiver';
import { BuiltInActuatorType } from './Actuators/IBuiltInActuator';
import { IHeuristicProvider } from './Actuators/IHeuristicProvider';
import { VectorActuator } from './Actuators/VectorActuator';
import { IDiscreteActionMask } from './Actuators/IDiscreteActionMask';
import { BehaviorParameters, ObservableAttributeOptions } from './Policies/BehaviorParameters';
import { IPolicy } from './Policies/IPolicy';
import { DemonstrationWriter } from './Demonstrations/DemonstrationWriter';
import { ISensor, SensorUtils } from './Sensors/ISensor';
import { VectorSensor } from './Sensors/VectorSensor';
import { StackingSensor } from './Sensors/StackingSensor';
import { ActuatorManager } from './Actuators/ActuatorManager';
import { IActuator } from './Actuators/IActuator';
import { CommunicatorFactory } from './Communicator/CommunicatorFactory';
import Macro from '../Macro';
import { RpcCommunicator } from './Communicator/RpcCommunicator';
import { EpisodeIdCounter } from './EpisodeIdCounter';
import { Academy } from './Academy';
import NNModel from '../Core/Internals/NNModel';
import { InferenceDevice } from './Policies/BarracudaPolicy';
import { Utilities } from './Utilities';
import { ObservableAttribute } from './Sensors/Reflection/ObservableAttribute';
import { SensorComponent } from './Sensors/SensorComponent';
import { ActuatorComponent } from './Actuators/ActuatorComponent';
import { CCCAgentsError } from './CCCAgentsError';
import { IMultiAgentGroup } from './IMultiAgentGroup';

/**
 * @zh
 * Agent的所有的信息
 */
export class AgentInfo {
    /**
     * @zh
     * 记录 Brain 所采取的最后行动
     */
    public storedActions: ActionBuffers = null!;

    /**
     * @zh
     * 离散控制(discrate control),agent不能控制的指定行为
     */
    public discreteActionMasks: boolean[] = null!;

    /**
     * @zh
     * 当前agent奖励
     */
    public reward: number = 0;

    /**
     * @zh
     * agent收到的当前group奖励
     */
    public groupReward: number = 0;

    /**
     * @zh
     * agent是否完成
     */
    public done: boolean = false;

    /**
     * @zh
     * agent是否达到本次章节最大步数
     */
    public maxStepReached: boolean = false;

    /**
     * 章节id
     */
    public episodeId: number = 0;

    /**
     * agent group 标示(identifier)
     */
    public groupId = 0;

    public clearActions(): void { 
        this.storedActions.clear();
    }

    public copyActions(actionBuffers: ActionBuffers): void { 
        const continuousActions = this.storedActions.continuousActions;
        for (let i = 0; i < actionBuffers.continuousActions.length; i++)
        {
            continuousActions.setValue(i,actionBuffers.continuousActions.getValue(i));
        }
        const discreteActions = this.storedActions.discreteActions;
        for (let i = 0; i < actionBuffers.discreteActions.length; i++)
        {
            discreteActions.setValue(i, actionBuffers.discreteActions.getValue(i));
        }
    }
}

export class AgentVectorActuator extends VectorActuator {
    constructor(
        actionReceiver: IActionReceiver,
        heuristicProvider: IHeuristicProvider,
        actionSpec: ActionSpec,
        name: string = 'VectorActuator'
    ) {
        super(actionReceiver, heuristicProvider, actionSpec, name);
    }

    getBuiltInActuatorType(): BuiltInActuatorType {
        return BuiltInActuatorType.VectorActuator;
    }
}

const { ccclass, requireComponent, executionOrder, property } = _decorator;

export class AgentParameters {
    masStep: number = 0;
}

/**
 * 完成原因
 */
export enum DoneReason {

    /**
     * @en
     * The episode was ended manually by calling {@link endEpisode}. 
     * 
     * @zh
     * 通过调用 {@link endEpisode} 手动结束章节。
     *
     * */
    DoneCalled,

    /**
     * @en
     * The max steps for the Agent were reached.
     * 
     * @zh
     * 达到了代理的最大步数。
     */
    MaxStepReached,

    /**
     * @en
     * The Agent was disabled.
     * 
     * @zh
     * 代理被禁用。
     */
    Disabled,
}

/**
 * @en
 * An agent is an actor that can observe its environment, decide on the
 * best course of action using those observations, and execute those actions
 * within the environment.
 * 
 * @zh
 * 代理是一个能够观察环境、根据这些观察结果决定最佳行动方案并在环境中执行这些行动的角色。
 * 
 * @description
 *  Use the Agent class as the subclass for implementing your own agents. Add 
 * your Agent implementation to a [GameObject] in the [CCC scene] that serves
 * as the agent's environment.
 * 
 * @zh
 * 使用 Agent 类作为实现自己代理的子类。将您的代理实现添加到 [CCC 场景] 中作为代理环境的 [GameObject]。
 * 
 * 
 *  Agents in an environment operate in *steps*. At each step, an agent collects observations,
 * passes them to its decision-making policy, and receives an action vector in response.
 * 
 * @zh
 * 环境中的代理以 *步* 运行。在每一步中，代理收集观察结果，将它们传递给其决策策略，并收到一个动作向量作为响应。
 * 
 *  Agents make observations using {@link ISensor} implementations. The ML-Agents
 * API provides implementations for visual observations ({@link CameraSensor})
 * raycast observations ({@link RayPerceptionSensor}), and arbitrary
 * data observations ({@link VectorSensor}). You can add the
 * {@link CameraSensorComponent} and {@link RayPerceptionSensorComponent2D} or
 * {@link RayPerceptionSensorComponent3D} components to an agent's [GameObject] to use
 * those sensor types. You can implement the {@link CollectObservations(VectorSensor)}
 * function in your Agent subclass to use a vector observation. The Agent class calls this
 * function before it uses the observation vector to make a decision. (If you only use
 * visual or raycast observations, you do not need to implement
 * {@link CollectObservations}.)
 * 
 * @zh
 * 代理使用 {@link ISensor} 实现进行观察。ML-Agents API 提供了视觉观察 ({@link CameraSensor})、
 * 射线投射观察 ({@link RayPerceptionSensor}) 和任意数据观察 ({@link VectorSensor}) 的实现。
 * 您可以将 {@link CameraSensorComponent} 和 {@link RayPerceptionSensorComponent2D} 或
 * {@link RayPerceptionSensorComponent3D} 组件添加到代理的 [GameObject] 中以使用这些传感器类型。
 * 您可以在 Agent 子类中实现 {@link CollectObservations(VectorSensor)} 函数以使用矢量观察。
 * Agent 类在使用观察向量做出决策之前调用此函数。(如果只使用视觉或射线投射观察，您不需要实现 {@link CollectObservations}。)
 * 
 * 

    Assign a decision making policy to an agent using a {@link BehaviorParameters}
 * component attached to the agent's [GameObject]. The {@link BehaviorType} setting
 * determines how decisions are made:
 * 
 * @zh
 * 使用附加到代理的 [GameObject] 的 {@link BehaviorParameters} 组件为代理分配决策策略。
 * {@link BehaviorType} 设置确定如何做出决策：
 * 
 
 * - {@link BehaviorType.Default}: decisions are made by the external process,
 *   when connected. Otherwise, decisions are made using inference. If no inference model
 *   is specified in the BehaviorParameters component, then heuristic decision
 *   making is used.
 * 
 *   @zh
 *   {@link BehaviorType.Default}: 决策由外部进程在连接时做出。否则，使用推理做出决策。
 *   如果在 BehaviorParameters 组件中未指定推理模型，则使用启发式决策。
 * 
 * - {@link BehaviorType.InferenceOnly}: decisions are always made using the trained
 *   model specified in the {@link BehaviorParameters} component.
 *   @zh
 *   {@link BehaviorType.InferenceOnly}: 总是使用 BehaviorParameters 组件中指定的训练模型做出决策。
 * 
 * - {@link BehaviorType.HeuristicOnly}: when a decision is needed, the agent's
 *   {@link Heuristic(in ActionBuffers)} function is called. Your implementation is responsible for
 *   providing the appropriate action.
 * 
 *  @zh
 *  {@link BehaviorType.HeuristicOnly}: 需要决策时，将调用代理的 {@link Heuristic(in ActionBuffers)} 函数。
 *  您的实现负责提供适当的动作。
 * 
 
      To trigger an agent decision automatically, you can attach a {@link DecisionRequester}
 * component to the Agent game object. You can also call the agent's {@link RequestDecision}
 * function manually. You only need to call {@link RequestDecision} when the agent is
 * in a position to act upon the decision. In many cases, this will be every [FixedUpdate]
 * callback, but could be less frequent. For example, an agent that hops around its environment
 * can only take an action when it touches the ground, so several frames might elapse between
 * one decision and the need for the next.
 * @zh
 * 要自动触发代理决策，可以将 {@link DecisionRequester} 组件附加到代理游戏对象。
 * 您还可以手动调用代理的 {@link RequestDecision} 函数。只有在代理有能力执行决策时才需要调用 {@link RequestDecision}。
 * 在许多情况下，这将是每个 [FixedUpdate] 回调，但可能不那么频繁。例如，一个在环境中跳跃的代理只有在接触地面时才能采取行动，
 * 因此在一个决策和下一个决策之间可能会经过几帧。
 * 
 
 
 * Use the {@link OnActionReceived(ActionBuffers)} function to implement the actions your agent can take,
 * such as moving to reach a goal or interacting with its environment.
 * 
 * @zh
 * 使用 {@link OnActionReceived(ActionBuffers)} 函数实现代理可以采取的动作，例如移动以达到目标或与环境进行交互。
 * 
 *
 * When you call {@link EndEpisode} on an agent or the agent reaches its {@link MaxStep} count,
 * its current episode ends. You can reset the agent -- or remove it from the
 * environment -- by implementing the {@link OnEpisodeBegin} function. An agent also
 * becomes done when the {@link Academy} resets the environment, which only happens when
 * the {@link Academy} receives a reset signal from an external process via the
 * {@link Academy.Communicator}.
 * 
 * @zh
 * 当您在代理上调用 {@link EndEpisode} 或代理达到其 {@link MaxStep} 计数时，其当前章节结束。
 * 您可以通过实现 {@link OnEpisodeBegin} 函数重置代理，或将其从环境中移除。
 * 当 {@link Academy} 重置环境时，代理也会变为完成，这只会在 {@link Academy} 通过 {@link Academy.Communicator} 从外部进程接收到重置信号时发生。
 * 
 *
 * The Agent class extends the Unity [Component] class. You can implement the
 * standard [Component] functions as needed for your agent. Since an agent's
 * observations and actions typically take place during the [FixedUpdate] phase, you should
 * only use the [Component.update] function for cosmetic purposes. If you override the [Component]
 * methods, [onEnable()] or [onDisable()], always call the base Agent class implementations.
 * 
 * @zh
 * Agent 类扩展了 CCC [Component] 类。您可以根据需要为代理实现标准 [Component] 函数。
 * 由于代理的观察和动作通常发生在 [FixedUpdate] 阶段，因此您应该仅将 [Component.Update] 函数用于装饰目的。
 * 如果您重写 [Component] 方法 [onEnable()] 或 [onDisable()]，请始终调用基类 Agent 类实现。
 * 
 *
 * You can implement the {@link Heuristic(in ActionBuffers)} function to specify agent actions using
 * your own heuristic algorithm. Implementing a heuristic function can be useful
 * for debugging. For example, you can use keyboard input to select agent actions in
 * order to manually control an agent's behavior.
 * @zh
 * 您可以实现 {@link Heuristic(in ActionBuffers)} 函数，使用自己的启发式算法指定代理动作。
 * 实现启发式函数可以用于调试。例如，您可以使用键盘输入选择代理动作，以手动控制代理的行为。
 * 
 *
 * Note that you can change the inference model assigned to an agent at any step
 * by calling {@link SetModel}.
 * 
 * @zh
 * 请注意，您可以通过调用 {@link SetModel} 在任何步骤更改分配给代理的推理模型。
 * 
 * 
 * @tutorial  https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Learning-Environment-Design-Agents.md
 */

@ccclass('Agent')
@requireComponent(BehaviorParameters)
@executionOrder(50)
export class Agent extends Component implements IActionReceiver, IHeuristicProvider {
    private _brain: IPolicy = null!;

    private _policyFactory: BehaviorParameters = null!;

    private _agentParameters: AgentParameters = null!;

    private _hasUpgradedFromAgentParameters: boolean = false;

    /**
     * @description
     * @en
     * The maximum number of steps the agent takes before being done.
     *
     * @zh
     * 代理在完成之前执行的最大步数。
     *
     * @en
     * The maximum steps for an agent to take before it resets; or 0 for
     * unlimited steps.
     * @zh
     * 代理在重置之前执行的最大步数；或 0 为无限步数。
     *
     * @en
     * The max step value determines the maximum length of an agent's episodes.
     * Set to a positive integer to limit the episode length to that many steps.
     * Set to 0 for unlimited episode length.
     * @zh
     * max step 值决定了代理章节的最大长度。
     * 设置为正整数以将章节长度限制为那么多步。
     * 设置为 0 以获得无限章节长度。
     *
     *
     * @en
     * When an episode ends and a new one begins, the Agent object's
     * {@link OnEpisodeBegin} function is called. You can implement
     * {@link OnEpisodeBegin} to reset the agent or remove it from the
     * environment. An agent's episode can also end if you call its {@link EndEpisode}
     * method or an external process resets the environment through the {@link Academy}.
     * @zh
     * 当一个章节结束并开始一个新的章节时，将调用 Agent 对象的 {@link OnEpisodeBegin} 函数。
     * 您可以实现 {@link OnEpisodeBegin} 来重置代理或将其从环境中移除。
     * 如果您调用其 {@link EndEpisode} 方法或外部进程通过 {@link Academy} 重置环境，代理的章节也会结束。
     *
     * @en
     * Consider limiting the number of steps in an episode to avoid wasting time during
     * training. If you set the max step value to a reasonable estimate of the time it should
     * take to complete a task, then agents that haven’t succeeded in that time frame will
     * reset and start a new training episode rather than continue to fail.
     * @zh
     * 考虑限制章节中的步数，以避免在训练过程中浪费时间。
     * 如果您将 max step 值设置为完成任务所需时间的合理估计值，那么在该时间范围内未成功的代理将重置并开始新的训练章节，而不是继续失败。
     *
     * @en
     * To use a step limit when training while allowing agents to run without resetting
     * outside of training, you can set the max step to 0 in {@link Initialize}
     * if the {@link Academy} is not connected to an external process.
     * @zh
     * 在训练时使用步数限制，同时允许代理在训练之外运行而不重置，您可以在 {@link Initialize} 中将 max step 设置为 0，
     * 如果 {@link Academy} 未连接到外部进程。
     *
     *
     * @example
     *
     * public class MyAgent : Agent
     * {
     *     public override void Initialize()
     *     {
     *         if (!Academy.Instance.IsCommunicatorOn)
     *         {
     *             this.maxStep = 0;
     *         }
     *     }
     * }
     *
     * @description
     * **Note:** in general, you should limit the differences between the code you execute
     * during training and the code you run during inference.
     * </example>
     */
    @property(CCInteger)
    maxStep: number = 0;

    /**
     * @en
     * Current Agent information (message sent to Brain).
     * @zh
     * 当前代理信息（发送给 Brain 的消息）。
     */
    private _info: AgentInfo = null!;

    /**
     * @en
     * Represents the reward the agent accumulated during the current step.
     * It is reset to 0 at the beginning of every step.
     * Should be set to a positive value when the agent performs a "good"
     * action that we wish to reinforce/reward, and set to a negative value
     * when the agent performs a "bad" action that we wish to punish/deter.
     * Additionally, the magnitude of the reward should not exceed 1.0
     * @zh
     * 表示代理在当前步骤中累积的奖励。
     * 它在每个步骤开始时重置为 0。
     * 当代理执行我们希望强化/奖励的“好”动作时，应将其设置为正值，并在代理执行我们希望惩罚/阻止的“坏”动作时将其设置为负值。
     * 此外，奖励的大小不应超过 1.0。
     *
     */
    private _reward: number = 0;

    /**
     * @en
     * Represents the group reward the agent accumulated during the current step.
     * @zh
     * 表示代理在当前步骤中累积的组奖励。
     */
    private _groupReward: number = 0;

    /**
     * @en
     * Keeps track of the cumulative reward in this episode.
     * @zh
     * 跟踪本章节中的累积奖励。
     */
    private _cumulativeReward: number = 0;

    /**
     * @en
     * Whether or not the agent requests an action.
     * @zh
     * 代理是否请求行动。
     */
    private _requestAction: boolean = false;

    /**
     * @en
     * Whether or not the agent requests a decision.
     * @zh
     * 代理是否请求决策。
     */
    private _requestDecision: boolean = false;

    /**
     * @en
     * Keeps track of the number of steps taken by the agent in this episode.
     * Note that this value is different for each agent, and may not overlap
     * with the step counter in the Academy, since agents reset based on
     * their own experience.
     *
     * @zh
     * 跟踪代理在本章节中所采取的步数。
     * 请注意，此值对于每个代理都是不同的，并且可能与 Academy 中的步数计数器不重叠，因为代理基于自己的经验重置。
     *
     */
    private _stepCount: number = 0;

    /**
     * @en
     * Number of times the Agent has completed an episode.
     * @zh
     * 代理完成章节的次数。
     */
    private _completedEpisodes: number = 0;

    /**
     * @en
     * Episode identifier each agent receives. It is used
     * to separate between different agents in the environment.
     * This Id will be changed every time the Agent resets.
     * 
     * @zh
     * 每个代理接收的章节标识符。它用于在环境中区分不同的代理。
     * 此 Id 将在代理重置时更改。
     * 
     */
    private _episodeId: number = 0;

    /**
     * @en
     *  Whether or not the Agent has been initialized already
     * @zh
     * 代理是否已经初始化
     */
    private _initialized: boolean = false;


    /**
     * @en
     *  Set of DemonstrationWriters that the Agent will write its step information to.
     * If you use a DemonstrationRecorder component, this will automatically register its DemonstrationWriter.
     * You can also add your own DemonstrationWriter by calling
     * DemonstrationRecorder.AddDemonstrationWriterToAgent()
     * 
     * @zh
     * 代理将其步骤信息写入的 DemonstrationWriter 集合。
     * 如果您使用 DemonstrationRecorder 组件，它将自动注册其 DemonstrationWriter。
     * 您还可以通过调用 DemonstrationRecorder.AddDemonstrationWriterToAgent() 添加自己的 DemonstrationWriter。
     * 
     */
    private _demonstrationWriters: Set<DemonstrationWriter> = new Set<DemonstrationWriter>();
    public get demonstrationWriters(): Set<DemonstrationWriter> {
        return this._demonstrationWriters;
    }
    public set demonstrationWriters(value: Set<DemonstrationWriter>) {
        this._demonstrationWriters = value;
    }

    /**
     * @en
     *  List of sensors used to generate observations.
     * Currently generated from attached SensorComponents, and a legacy VectorSensor
     * 
     * @zh
     * 用于生成观察结果的传感器列表。
     * 当前从附加的 SensorComponents 和传统的 VectorSensor 生成
     */
    private _sensors: ISensor[] = null!;

    /**
     * @en
     * VectorSensor which is written to by AddVectorObs
     * 
     * @zh
     * 由 AddVectorObs 写入的 VectorSensor
     */
    private _collectObservationsSensor: VectorSensor = null!;

    /**
     * @en
     *  StackingSensor which is written to by AddVectorObs
     * @zh
     *  由 AddVectorObs 写入的 StackingSensor
     */
    private _stackedCollectObservationsSensor: StackingSensor = null!;


    // private _collectObservationsChecker: RecursionChecker = new RecursionChecker('CollectObservations');
    // private _onEpisodeBeginChecker: RecursionChecker = new RecursionChecker('OnEpisodeBegin');

    /**
     * @en 
     * List of IActuators that this Agent will delegate actions to if any exist.
     * @zh
     * 代理将行动委托给的 IActuators 列表（如果有）。
     */
    private _actuatorManager: ActuatorManager = null!;

    /**
     * @en
     * VectorActuator which is used by default if no other sensors exist on this Agent. This VectorSensor will
     * delegate its actions to {@link onActionReceived(ActionBuffers)} by default in order to keep backward compatibility
     * with the current behavior of Agent.
     * 
     * @zh
     * 如果此代理上不存在其他传感器，则默认使用的 VectorActuator。此 VectorSensor 将默认将其行动委托给 {@link onActionReceived(ActionBuffers)}，
     * 以便与 Agent 的当前行为保持向后兼容性。
     */
    private _vectorActuator: IActuator = null!;

    /**
     * @en
     * Current MultiAgentGroup ID. Default to 0 (meaning no group)
     * @zh
     * 当前 MultiAgentGroup ID。默认为 0（表示没有组）
     */
    private _groupId: number = 0;


    /**
     * @en
     * Delegate for the agent to unregister itself from the MultiAgentGroup without cyclic reference
     * between agent and the group.
     * 
     * @zh
     * 代理取消注册自己的 MultiAgentGroup 的委托，以避免代理和组之间的循环引用。
     * 
     * @param agent 
     */
    onAgentDisabled: (agent: Agent) => void = (agent: Agent) => { };


    /**
     * @en
     *  Called when the Agent is being loaded (before onEnable()).
     * 
     *  This function registers the RpcCommunicator delegate if no delegate has been registered with CommunicatorFactory.
     * Always call the base Agent class version of this function if you implement `onLoad()` in your
     * own Agent subclasses.
     * 
     * 
     * @zh
     * 当代理正在加载时调用（在 onEnable() 之前）。
     * 
     * 此函数在 CommunicatorFactory 中没有注册委托时注册 RpcCommunicator 委托。
     * 如果您在自己的 Agent 子类中实现 `onLoad()`，请始终调用基类 Agent 类版本的此函数。
     * 
     * @example
     * ```ts
     *  onLoad(): void {
     *      super.onLoad();
     *      /// Your code here
     * }
     * ```
     * 
     */
    onLoad(): void {
        if (!CommunicatorFactory.communicatorRegistered) {
            if (Macro.CCC_DEV) {
                CommunicatorFactory.register(RpcCommunicator.create);
            }
        }
    }

    /**
     * @en
     * Called when the attached [GameObject] becomes enabled and active.
     * 
     *  This function initializes the Agent instance, if it hasn't been initialized yet.
     * Always call the base Agent class version of this function if you implement `onEnable()`
     * in your own Agent subclasses.
     * 
     * @zh
     * 当附加的 [GameObject] 变为启用并活动时调用。
     * 
     * 此函数初始化 Agent 实例（如果尚未初始化）。
     * 如果您在自己的 Agent 子类中实现 `onEnable()`，请始终调用基类 Agent 类版本的此函数。
     * 
     * 
     * @example
     * ```ts
     *  onEnable(): void {
     *     super.onEnable();
     *    /// Your code here
     * }
     * ```
     */
    onEnable(): void {
        this.lazyInitialize();
    }


    public lazyInitialize(): void {
        if (this._initialized) {
            return;
        }
        this._initialized = true;

        // Grab the "static" properties for the Agent.
        this._episodeId = EpisodeIdCounter.getEpisodeId();
        this._policyFactory = this.getComponent(BehaviorParameters) as BehaviorParameters;

        this._info = new AgentInfo();
        this._sensors = new Array<ISensor>();

        Academy.instance.agentIncrementStep = this.agentIncrementStep.bind(this);
        Academy.instance.agentSendState = this.sendInfo.bind(this);
        Academy.instance.decideAction = this.decideAction.bind(this);
        Academy.instance.agentAct = this.agentStep.bind(this);
        Academy.instance.agentForceReset = this._agentReset.bind(this);

        this.initializeActuators();

        this._brain = this._policyFactory.generatePolicy(
            this._actuatorManager.getCombinedActionSpec(),
            this._actuatorManager
        );
        this.resetData();
        this.initialize();
        this.initializeSensors();

        this._info.storedActions = new ActionBuffers(
            new Array<number>(this._actuatorManager.numContinuousActions),
            new Array<number>(this._actuatorManager.numDiscreteActions)
        );

        this._info.groupId = this._groupId;

        /**
         * @en
        * The first time the Academy resets, all Agents in the scene will be
        * forced to reset through the {@link AgentForceReset} event.
        * To avoid the Agent resetting twice, the Agents will not begin their
        * episode when initializing until after the Academy had its first reset.
        * 
        * @zh
        * 当 Academy 重置时，场景中的所有代理都将通过 {@link AgentForceReset} 事件强制重置。
        *  为了避免代理重置两次，代理在初始化时不会开始章节，直到 Academy 第一次重置后。
        * 
        */
        if (Academy.instance.totalStepCount !== 0) {
            this.onEpisodeBegin();
        }
    }


    protected onDisable(): void {
        this._demonstrationWriters.clear();

        if (Academy.isInitialized) {
            Academy.instance.agentIncrementStep = null!;
            Academy.instance.agentSendState = null!;
            Academy.instance.decideAction = null!;
            Academy.instance.agentAct = null!;
            Academy.instance.agentForceReset = null!;
            this.notifyAgentDone(DoneReason.Disabled);
        }

        this.cleanupSensors();
        this.onAgentDisabled?.(this);
        this._initialized = false;
    }

    private notifyAgentDone(doneReason: DoneReason): void {
        if (this._info.done) {
            return;
        }
        this._info.episodeId = this._episodeId;
        this._info.reward = this._reward;
        this._info.groupReward = this._groupReward;
        this._info.done = true;
        this._info.maxStepReached = doneReason === DoneReason.MaxStepReached;
        this._info.groupId = this._groupId;
        this.updateSensors();
        this.collectObservations(this._collectObservationsSensor);
        this._brain?.requestDecision(this._info, this._sensors);
        if (this._demonstrationWriters.size !== 0) {
            for (const demoWriter of this._demonstrationWriters) {
                demoWriter.record(this._info, this._sensors);
            }
        }
        this.resetSensors();
        if (doneReason !== DoneReason.Disabled) {
            this._completedEpisodes++;
            this.updateRewardStats();
        }
        this._reward = 0;
        this._groupReward = 0;
        this._cumulativeReward = 0;
        this._requestAction = false;
        this._requestDecision = false;
        this._info.storedActions.clear();
    }

    /**
     * 
     * @en
     * Updates the Model assigned to this Agent instance.
     * 
     * If the agent already has an assigned model, that model is replaced with the
     * the provided one. However, if you call this function with arguments that are
     * identical to the current parameters of the agent, then no changes are made.
     * 
     * @zh
     * 更新分配给此代理实例的模型。
     * 
     * 如果代理已经有一个分配的模型，那么该模型将被提供的模型替换。
     * 但是，如果您使用与代理的当前参数相同的参数调用此函数，则不会进行任何更改。
     * 
     * @param behaviorName The identifier fo the behavior.
     * @param model The model to use for inference
     * @param inferenceDevice define the device on which the model will be run
     * @returns 
     */
    public setModel(
        behaviorName: string,
        model: NNModel,
        inferenceDevice: InferenceDevice = InferenceDevice.Default
    ): void {
        if (
            behaviorName === this._policyFactory.behaviorName &&
            model === this._policyFactory.model &&
            inferenceDevice === this._policyFactory.inferenceDevice
        ) {
            return;
        }
        this.notifyAgentDone(DoneReason.Disabled);
        this._policyFactory.model = model;
        this._policyFactory.inferenceDevice = inferenceDevice;
        this._policyFactory.behaviorName = behaviorName;
        this.reloadPolicy();
    }

    reloadPolicy(): void {
        if (!this._initialized) {
            return;
        }

        this._brain = this._policyFactory.generatePolicy(
            this._actuatorManager.getCombinedActionSpec(),
            this._actuatorManager
        );
    }

    /**
     * @en
     * Returns the current step counter (within the current episode).
     * 
     * @zh
     * 返回当前步数计数器（在当前章节中）。
     */
    public get stepCount(): number {
        return this._stepCount;
    }

    public get completedEpisodes(): number {
        return this._completedEpisodes;
    }

    /**
     * @en
     * Overrides the current step reward of the agent and updates the episode
     * reward accordingly.
     *
     * This function replaces any rewards given to the agent during the current step.
     * Use {@link addReward(float) } to incrementally change the reward rather than
     * overriding it.
     *
     * Typically, you assign rewards in the Agent subclass's {@link onActionReceived(ActionBuffers)}
     * implementation after carrying out the received action and evaluating its success.
     *
     * Rewards are used during reinforcement learning; they are ignored during inference.
     *
     * See [Agents - Rewards] for general advice on implementing rewards and [Reward Signals]
     * for information about mixing reward signals from curiosity and Generative Adversarial
     * Imitation Learning (GAIL) with rewards supplied through this method.
     * 
     * @zh
     * 覆盖代理的当前步骤奖励，并相应地更新章节奖励。
     * 
     * 此函数替换了当前步骤中给予代理的任何奖励。
     * 使用 {@link addReward(float)} 逐步更改奖励，而不是覆盖它。
     * 
     * 通常，在代理子类的 {@link onActionReceived(ActionBuffers)} 实现中执行接收到的动作并评估其成功后，分配奖励。
     * 
     * 在强化学习中使用奖励；在推理期间忽略它们。
     * 
     * 有关实施奖励的一般建议，请参见 [Agents - Rewards]，有关通过好奇心和生成对抗性模仿学习（GAIL）的奖励信号与通过此方法提供的奖励信号混合的信息，请参见 [Reward Signals]。
     * [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Learning-Environment-Design-Agents.md#rewards
     * @param reward  The new value of the reward
     */
    public setReward(reward: number): void {
        Utilities.debugCheckNullAndNaN(reward, 'reward', 'setReward');
        this._cumulativeReward += reward - this._reward;
        this._reward = reward;
    }


    /**
     * @en
     * Increments the step and episode rewards by the provided value.
     * 
     * Use a positive reward to reinforce desired behavior. You can use a
     * negative reward to penalize mistakes. Use {@link setReward(float)} to
     * set the reward assigned to the current step with a specific value rather than
     * increasing or decreasing it.
     * 
     * @zh
     * 通过提供的值增加步骤和章节奖励。
     * 
     * 使用正奖励来强化期望的行为。您可以使用负奖励来惩罚错误。
     * 使用 {@link setReward(float)} 为当前步骤分配特定值的奖励，而不是增加或减少它。
     * 
     * @param increment 
     */
    public addReward(increment: number): void {
        Utilities.debugCheckNullAndNaN(increment, 'increment', 'addReward');
        this._reward += increment;
        this._cumulativeReward += increment;
    }

    setGroupReward(reward: number): void {
        Utilities.debugCheckNullAndNaN(reward, 'reward', 'setGroupReward');
        this._groupReward = reward;
    }


    addGroupReward(increment: number): void {
        Utilities.debugCheckNullAndNaN(increment, 'increment', 'addGroupReward');
        this._groupReward += increment;
    }

    /**
     * 
     * @en
     *  Retrieves the episode reward for the Agent.
     * 
     * @zh
     * 检索代理的章节奖励。
     * @returns 
     */
    public getCumulativeReward(): number {
        return this._cumulativeReward;
    }

    private updateRewardStats(): void {
        // const gaugeName = `${this._policyFactory.behaviorName}.CumulativeReward`;
        // TimerStack.Instance.setGauge(gaugeName, this.getCumulativeReward());
    }

    /**
     * 
     * @en
     * Sets the done flag to true and resets the agent. 
     * 
     *  This should be used when the episode can no longer continue, such as when the Agent
     *  reaches the goal or fails at the task.
     * 
     * @zh
     * 将 done 标志设置为 true 并重置代理。
     * 
     * 当章节无法继续时（例如，当代理达到目标或在任务中失败时），应使用此函数。
     */

    public endEpisode(): void {
        this.endEpisodeAndReset(DoneReason.DoneCalled);
    }

    /**
     * 
     * @en
     *  Indicate that the episode is over but not due to the "fault" of the Agent.
     * This has the same end result as calling {@link endEpisode}, but has a
     * slightly different effect on training.
     * 
     * This should be used when the episode could continue, but has gone on for
     * a sufficient number of steps.
     * 
     * @zh
     * 表示章节结束，但不是由于代理的“错误”。
     * 这与调用 {@link endEpisode} 具有相同的最终结果，但对训练的影响略有不同。
     * 
     * 当章节可以继续，但已经进行了足够多的步骤时，应使用此函数。
     * 
     */
    public episodeInterrupted(): void {
        this.endEpisodeAndReset(DoneReason.MaxStepReached);
    }

    /**
     * @en
     *  Internal method to end the episode and reset the Agent.
     * 
     * @zh
     * 结束章节并重置代理的内部方法。
     * @param reason 
     */
    private endEpisodeAndReset(reason: DoneReason): void {
        this.notifyAgentDone(reason);
        this._agentReset();
    }

    /**
     * @en
     *  Requests a new decision for this agent.
     * Call `requestDecision()` whenever an agent needs a decision. You often
     * want to request a decision every environment step. However, if an agent
     * cannot use the decision every step, then you can request a decision less
     * frequently.
     *
     * You can add a {@link DecisionRequester} component to the agent's
     * [GameObject] to drive the agent's decision making. When you use this component,
     * do not call `requestDecision()` separately.
     *
     * Note that this function calls {@link requestAction}; you do not need to
     * call both functions at the same time.
     * 
     * @zh
     * 为此代理请求新的决策。
     * 
     * 每当代理需要决策时，请调用 `requestDecision()`。通常，您希望在每个环境步骤中请求一个决策。
     * 但是，如果代理无法在每一步中使用决策，那么您可以更少频繁地请求决策。
     * 
     * 您可以将 {@link DecisionRequester} 组件添加到代理的 [GameObject] 中，以驱动代理的决策制定。
     * 当您使用此组件时，不要单独调用 `requestDecision()`。
     * 
     * 请注意，此函数调用 {@link requestAction}；您不需要同时调用这两个函数。
     */
    public requestDecision(): void {
        this._requestDecision = true;
        this.requestAction();
    }

    /**
     * @en
     * Requests an action for this agent.
     * Call `requestAction()` to repeat the previous action returned by the agent's
     * most recent decision. A new decision is not requested. When you call this function,
     * the Agent instance invokes {@link IActionReceiver.onActionReceived} with the
     * existing action vector.
     *
     * You can use `RequestAction()` in situations where an agent must take an action
     * every update, but doesn't need to make a decision as often. For example, an
     * agent that moves through its environment might need to apply an action to keep
     * moving, but only needs to make a decision to change course or speed occasionally.
     *
     * You can add a {@link DecisionRequester} component to the agent's
     * [GameObject] to drive the agent's decision making and action frequency. When you
     * use this component, do not call `requestAction()` separately.
     *
     * Note that {@link requestDecision} calls `requestAction()`; you do not need to
     * call both functions at the same time.
     * 
     * @zh
     * 为此代理请求行动。
     * 调用 `requestAction()` 以重复代理的最近决策返回的上一个动作。
     * 不会请求新的决策。当您调用此函数时，Agent 实例会使用现有的动作向量调用 {@link IActionReceiver.onActionReceived}。
     * 
     * 您可以在代理必须在每次更新时采取行动但不需要经常做出决策的情况下使用 `RequestAction()`。
     * 例如，通过其环境移动的代理可能需要应用一个动作以保持移动，但只需要偶尔改变航向或速度。
     * 
     * 您可以将 {@link DecisionRequester} 组件添加到代理的 [GameObject] 中，以驱动代理的决策制定和行动频率。
     * 当您使用此组件时，不要单独调用 `requestAction()`。
     * 
     * 请注意，{@link requestDecision} 调用 `requestAction()`；您不需要同时调用这两个函数。
     * 
     * 
     */
    public requestAction(): void {
        this._requestAction = true;
    }

    /**
     * @en
     * Helper function that resets all the data structures associated with
     * the agent. Typically used when the agent is being initialized or reset
     * at the end of an episode.
     * 
     * @zh
     * 重置与代理关联的所有数据结构的辅助函数。通常在代理初始化或在章节结束时重置时使用。
     */
    private resetData(): void {
        this._actuatorManager?.resetData();
    }


    /**
     * @en
     * Implement `initialize()` to perform one-time initialization or set up of the
     * Agent instance.
     * 
     * `initialize()` is called once when the agent is first enabled. If, for example,
     * the Agent object needs references to other [GameObjects] in the scene, you
     * can collect and store those references here.
     *
     * Note that {@link onEpisodeBegin}is called at the start of each of
     * the agent's "episodes". You can use that function for items that need to be reset
     * for each episode.
     * 
     * @zh
     * 实现 `initialize()` 来执行一次性初始化或设置代理实例。
     * 
     * 当代理首次启用时，将调用 `initialize()` 一次。例如，如果 Agent 对象需要引用场景中的其他 [GameObjects]，则可以在此处收集并存储这些引用。
     * 
     * 请注意，{@link onEpisodeBegin} 在代理的每个“章节”开始时调用。您可以使用该函数重置每个章节所需的项目。
     */
    public initialize(): void { }


    /**
     * @en
     * Implement {@link Heuristic} to choose an action for this agent using a custom heuristic.
     * 
     * Implement this function to provide custom decision making logic or to support manual
     * control of an agent using keyboard, mouse, game controller input, or a script.
     *
     * Your heuristic implementation can use any decision making logic you specify. Assign decision
     * values to the {@link ActionBuffers.ContinuousActions}  and {@link ActionBuffers.DiscreteActions}
     * arrays , passed to your function as a parameter.
     * The same array will be reused between steps. It is up to the user to initialize
     * the values on each call, for example by calling `Array.Clear(actionsOut, 0, actionsOut.Length);`.
     * Add values to the array at the same indexes as they are used in your
     * {@link IActionReceiver.OnActionReceived} function, which receives this array and
     * implements the corresponding agent behavior. See [Actions] for more information
     * about agent actions.
     * Note : Do not create a new float array of action in the `Heuristic()` method,
     * as this will prevent writing floats to the original action array.
     *
     * An agent calls this `Heuristic()` function to make a decision when you set its behavior
     * type to {@link BehaviorType.HeuristicOnly}. The agent also calls this function if
     * you set its behavior type to {@link BehaviorType.Default} when the
     * {@link Academy} is not connected to an external training process and you do not
     * assign a trained model to the agent.
     *
     * To perform imitation learning, implement manual control of the agent in the `Heuristic()`
     * function so that you can record the demonstrations required for the imitation learning
     * algorithms. (Attach a [Demonstration Recorder] component to the agent's [GameObject] to
     * record the demonstration session to a file.)
     *
     * Even when you don’t plan to use heuristic decisions for an agent or imitation learning,
     * implementing a simple heuristic function can aid in debugging agent actions and interactions
     * with its environment.
     * 
     * 
     * @zh
     * 
     * 实现 {@link Heuristic} 以使用自定义启发式为此代理选择行动。
     * 
     * 实现此函数以提供自定义决策逻辑或支持使用键盘、鼠标、游戏控制器输入或脚本手动控制代理。
     * 
     * 您的启发式实现可以使用您指定的任何决策逻辑。将决策值分配给 {@link ActionBuffers.ContinuousActions} 和 {@link ActionBuffers.DiscreteActions} 数组，
     * 作为参数传递给您的函数。相同的数组将在步骤之间重用。由用户在每次调用时初始化值，例如通过调用 `Array.Clear(actionsOut, 0, actionsOut.Length);`。
     * 在与 {@link IActionReceiver.OnActionReceived} 函数中使用的相同索引处向数组添加值，该函数接收此数组并实现相应的代理行为。
     * 有关代理行动的更多信息，请参见 [Actions]。
     * 
     * 注意：不要在 `Heuristic()` 方法中创建新的行动浮点数组，因为这将阻止将浮点写入原始行动数组。
     * 
     * 当您将其行为类型设置为 {@link BehaviorType.HeuristicOnly} 时，代理调用此 `heuristic()` 函数以做出决策。
     * 
     * 如果您将其行为类型设置为 {@link BehaviorType.Default}，并且 {@link Academy} 未连接到外部训练过程，并且未将训练模型分配给代理，
     * 则代理也会调用此函数。
     * 
     * 
     * @param actionBuffers which contain the continuous and 
     * discrete action buffers to write to.
     */
    public heuristic(actionsOut: ActionBuffers): void {
        warn("heuristic method called but not implemented. Returning placeholder actions.");
    }

    /**
     * @en
     * Set up the list of ISensors on the Agent. By default, this will select any
     * SensorComponent's attached to the Agent.
     * 
     * @zh
     * 在代理上设置 ISensors 列表。默认情况下，这将选择附加到代理的任何 SensorComponent。
     */
    private initializeSensors(): void {
        if (this._policyFactory == null) {
            this._policyFactory = this.getComponent(BehaviorParameters) as BehaviorParameters;
        }
        if (this._policyFactory.observableAttributeHandling !== ObservableAttributeOptions.Ignore) {
            const excludeInherited = this._policyFactory.observableAttributeHandling === ObservableAttributeOptions.ExcludeInherited;
            const observableSensors = ObservableAttribute.createObservableSensors(this, excludeInherited);
            this._sensors.push(...observableSensors);
        }

        let attachedSensorComponents: SensorComponent[];
        if (this._policyFactory.useChildSensors) {
            attachedSensorComponents = this.getComponentsInChildren('SensorComponent') as SensorComponent[];
        } else {
            attachedSensorComponents = this.getComponents('SensorComponent') as SensorComponent[];
        }

        this._sensors.length += attachedSensorComponents.length;
        for (const component of attachedSensorComponents) {
            this._sensors.push(...component.createSensors());
        }

        const param = this._policyFactory.brainParameters;
        if (param.vectorObservationSize > 0) {
             this._collectObservationsSensor = new VectorSensor(param.vectorObservationSize);
            if (param.numStackedVectorObservations > 1) {
                this._stackedCollectObservationsSensor = new StackingSensor(
                    this._collectObservationsSensor,
                    param.numStackedVectorObservations);
                this._sensors.push(this._stackedCollectObservationsSensor);
            } else {
                this._sensors.push(this._collectObservationsSensor);
            }
        }

        SensorUtils.sortSensors(this._sensors);

        for (let i = 0; i < this._sensors.length - 1; i++) {
            assert(this._sensors[i].getName() !== this._sensors[i + 1].getName(), "Sensor names must be unique.");
        }
    }

    private cleanupSensors(): void {
        for (let i = 0; i < this._sensors.length; i++) {
            const sensor = this._sensors[i];
            sensor.reset();
        }
    }

    private initializeActuators(): void {
        let attachedActuators: ActuatorComponent[];
        if (this._policyFactory.useChildActuators) {
            attachedActuators = this.getComponentsInChildren('ActuatorComponent') as ActuatorComponent[];
        } else {
            attachedActuators = this.getComponents('ActuatorComponent') as ActuatorComponent[];
        }

        const param = this._policyFactory.brainParameters;
        this._vectorActuator = new AgentVectorActuator(this, this, param.actionSpec);
        this._actuatorManager = new ActuatorManager(attachedActuators.length + 1);

        this._actuatorManager.add(this._vectorActuator);

        for (const actuatorComponent of attachedActuators) {
            this._actuatorManager.addActuators(actuatorComponent.createActuators());
        }
    }

    /**
     * @en
     *  Sends the Agent info to the linked Brain.
     * 
     * @zh
     * 将代理信息发送到链接的 Brain。
     * @returns 
     */
    private sendInfoToBrain(): void {
        if (!this._initialized) {
            throw new CCCAgentsError("Call to SendInfoToBrain when Agent hasn't been initialized." +
                "Please ensure that you are calling 'base.OnEnable()' if you have overridden OnEnable.");
        }

        if (this._brain == null) {
            return;
        }

        if (this._info.done) {
            this._info.clearActions();
        } else {
            this._info.copyActions(this._actuatorManager.storedActions);
        }

        this.updateSensors();
        this.collectObservations(this._collectObservationsSensor);

        this._actuatorManager.writeActionMask();

        this._info.discreteActionMasks = this._actuatorManager.discreteActionMask?.getMask();
        this._info.reward = this._reward;
        this._info.groupReward = this._groupReward;
        this._info.done = false;
        this._info.maxStepReached = false;
        this._info.episodeId = this._episodeId;
        this._info.groupId = this._groupId;

        this._brain.requestDecision(this._info, this._sensors);

        // If we have any DemonstrationWriters, write the AgentInfo and sensors to them.
        if (this.demonstrationWriters.size !== 0) {
            for (const demoWriter of this.demonstrationWriters) {
                demoWriter.record(this._info, this._sensors);
            }
        }
    }

    private updateSensors(): void {
        for (const sensor of this._sensors) {
            sensor.update();
        }
    }

    private resetSensors(): void {
        for (const sensor of this._sensors) {
            sensor.reset();
        }
    }

    /**
     * 
     * @en
     * Implement `collectObservations()` to collect the vector observations of
     * the agent for the  step. The agent observation describes the current
     * environment from the perspective of the agent.
     * 
     *  An agent's observation is any environment information that helps
     * the agent achieve its goal. For example, for a fighting agent, its
     * observation could include distances to friends or enemies, or the
     * current level of ammunition at its disposal.
     *
     * You can use a combination of vector, visual, and raycast observations for an
     * agent. If you only use visual or raycast observations, you do not need to
     * implement a `collectObservations()` function.
     * 
     *  You can use any combination of these helper functions to build the agent's
     * vector of observations. You must build the vector in the same order
     * each time `CollectObservations()` is called and the length of the vector
     * must always be the same. In addition, the length of the observation must
     * match the {@link BrainParameters.VectorObservationSize}
     * attribute of the linked Brain, which is set in the Editor on the
     * **Behavior Parameters** component attached to the agent's [GameObject].
     * 
     * @zh
     * 实现 `collectObservations()` 以收集代理的向量观察结果。
     * 
     * 代理观察描述了代理的当前环境。代理的观察是帮助代理实现其目标的任何环境信息。
     * 例如，对于战斗代理，其观察可能包括到朋友或敌人的距离，或者其可用弹药的当前水平。
     * 
     * 您可以为代理使用向量、视觉和射线投射观察的组合。如果只使用视觉或射线投射观察，您不需要实现 `collectObservations()` 函数。
     * 
     * 您可以使用这些辅助函数的任何组合来构建代理的观察向量。每次调用 `CollectObservations()` 时，您必须以相同的顺序构建向量，并且向量的长度必须始终相同。
     * 此外，观察的长度必须与链接的 Brain 上的 {@link BrainParameters.VectorObservationSize} 属性匹配，
     * 该属性在代理的 [GameObject] 上附加的 **Behavior Parameters** 组件中设置。
     * 
     * 
     * @param sensor 
     */
    collectObservations(sensor: VectorSensor): void { }


    getObservations(): number[] {
        return this._collectObservationsSensor.observations;
    }

    getStackedObservations(): number[] {
        return this._stackedCollectObservationsSensor.getStackedObservations();
    }

    /**
     * @en
     * Implement `WriteDiscreteActionMask()` to collects the masks for discrete
     * actions. When using discrete actions, the agent will not perform the masked
     * action.
     * 
     * @zh
     * 实现 `WriteDiscreteActionMask()` 以收集离散行动的掩码。
     * 
     * @param actionMask 
     */
    writeDiscreteActionMask(actionMask: IDiscreteActionMask): void { }


    /**
     * 
     * Implement `OnActionReceived()` to specify agent behavior at every step, based
     * on the provided action.
     * 
     *  An action is passed to this function in the form of an  {@link ActionBuffers}.
     * Your implementation must use the array to direct the agent's behavior for the
     * current step.
     *
     * You decide how many elements you need in the ActionBuffers to control your
     * agent and what each element means. For example, if you want to apply a
     * force to move an agent around the environment, you can arbitrarily pick
     * three values in ActionBuffers.ContinuousActions array to use as the force components.
     * During training, the agent's  policy learns to set those particular elements of
     * the array to maximize the training rewards the agent receives. (Of course,
     * if you implement a {@link Agent.heuristic(in ActionBuffers)} function, it must use the same
     * elements of the action array for the same purpose since there is no learning
     * involved.)
     *
     * An Agent can use continuous and/or discrete actions. Configure this  along with the size
     * of the action array,  in the {@link BrainParameters} of the agent's associated
     * {@link BehaviorParameters} component.
     *
     * When an agent uses continuous actions, the values in the ActionBuffers.ContinuousActions
     * array are floating point numbers. You should clamp the values to the range,
     * -1..1, to increase numerical stability during training.
     *
     * When an agent uses discrete actions, the values in the ActionBuffers.DiscreteActions array
     * are integers that each represent a specific, discrete action. For example,
     * you could define a set of discrete actions such as:
     *
     * <code>
     * 0 = Do nothing
     * 1 = Move one space left
     * 2 = Move one space right
     * 3 = Move one space up
     * 4 = Move one space down
     * </code>
     *
     * When making a decision, the agent picks one of the five actions and puts the
     * corresponding integer value in the ActionBuffers.DiscreteActions array. For example, if the agent
     * decided to move left, the ActionBuffers.DiscreteActions parameter would be an array with
     * a single element with the value 1.
     *
     * You can define multiple sets, or branches, of discrete actions to allow an
     * agent to perform simultaneous, independent actions. For example, you could
     * use one branch for movement and another branch for throwing a ball left, right,
     * up, or down, to allow the agent to do both in the same step.
     *
     * The ActionBuffers.DiscreteActions array of an agent with discrete actions contains one
     * element for each  branch. The value of each element is the integer representing the
     * chosen action for that branch. The agent always chooses one action for each branch.
     *
     * When you use the discrete actions, you can prevent the training process
     * or the neural network model from choosing specific actions in a step by
     * implementing the {@link writeDiscreteActionMask(IDiscreteActionMask)}
     * method. For example, if your agent is next to a wall, you could mask out any
     * actions that would result in the agent trying to move into the wall.
     * 
     * @zh
     * 实现 `OnActionReceived()` 以指定每个步骤的代理行为，基于提供的行动。
     * 
     * 以 {@link ActionBuffers} 的形式将一个行动传递给此函数。您的实现必须使用数组来指导代理的当前步骤行为。
     * 
     * 您可以决定在 ActionBuffers 中需要多少元素来控制代理以及每个元素的含义。例如，如果您想施加一个
     * 力来移动代理以穿越环境，您可以任意选择 ActionBuffers.ContinuousActions 数组中的三个值作为力的分量。
     * 在训练期间，代理的策略学习将这些特定元素设置为数组的最大训练奖励。 （当然，如果您实现了一个 {@link Agent.heuristic(in ActionBuffers)} 函数，
     * 它必须使用相同的行动数组元素来实现相同的目的，因为没有涉及学习。）
     * 
     * 代理可以使用连续或离散行动。将此与行动数组的大小一起配置在代理关联的 {@link BehaviorParameters} 组件的 {@link BrainParameters} 中。
     * 
     * 当代理使用连续行动时，ActionBuffers.ContinuousActions 数组中的值是浮点数。您应该将值夹紧到范围 -1..1，以增加训练期间的数值稳定性。
     * 
     * 当代理使用离散行动时，ActionBuffers.DiscreteActions 数组中的值是每个表示特定离散行动的整数。例如，您可以定义一组离散行动，例如：
     * 
     * <code>
     * 0 = Do nothing
     * 1 = Move one space left
     * 2 = Move one space right
     * 3 = Move one space up
     * 4 = Move one space down
     * </code>
     * 
     * 在做出决策时，代理选择五个行动中的一个，并将相应的整数值放入 ActionBuffers.DiscreteActions 数组中。例如，如果代理决定向左移动，
     * 则 ActionBuffers.DiscreteActions 参数将是一个具有值 1 的单个元素的数组。
     * 
     * 您可以定义多个离散行动集或分支，以允许代理执行同时、独立的行动。例如，您可以使用一个分支进行移动，另一个分支进行向左、向右、向上或向下扔球，
     * 以允许代理在同一步骤中执行这两个操作。
     * 
     * 具有离散行动的代理的 ActionBuffers.DiscreteActions 数组包含每个分支的一个元素。每个元素的值是表示该分支所选行动的整数。代理总是为每个分支选择一个行动。
     * 
     * 当您使用离散行动时，您可以通过实现 {@link writeDiscreteActionMask(IDiscreteActionMask)} 方法阻止训练过程或神经网络模型在一步中选择特定行动。
     * 例如，如果您的代理靠近墙壁，您可以屏蔽任何导致代理试图移动到墙壁中的行动。
     * 
     * 
     * @param actionBuffers Struct containing the buffers of actions to be executed at this step.
     */
    onActionReceived(actionBuffers: ActionBuffers): void { }

    /**
     * @en
     * Implement `OnEpisodeBegin()` to set up an Agent instance at the beginning
     * of an episode.
     * 
     * @zh
     * 实现 `OnEpisodeBegin()` 以在章节开始时设置代理实例。
     * 
     */
    onEpisodeBegin() { }

    getStoredActionBuffers(): ActionBuffers {
        return this._actuatorManager.storedActions;
    }


    _agentReset(): void {
        this.resetData();
        this._stepCount = 0;
        this.onEpisodeBegin();
    }


    /**
     * 
     * @en
     * Scales continuous action from [-1, 1] to arbitrary range.
     * 
     * @zh
     * 将连续行动从 [-1, 1] 缩放到任意范围。
     * 
     * 
     * @param rawAction - The raw action value. 
     * @param min - The minimum value of the range.
     * @param max - The maximum value of the range.
     * @returns 
     */
    protected static scaleAction(rawAction: number, min: number, max: number): number {
        const middle = (min + max) / 2;
        const range = (max - min) / 2;
        return rawAction * range + middle;
    }

    /**
     * @en
     * Signals the agent that it must send its decision to the brain.
     * 
     * @zh
     * 通知代理必须将其决策发送到 Brain。
     */
    private sendInfo(): void {
        // log("2 sendInfo");
        if (this._requestDecision) {
            this.sendInfoToBrain();
            this._reward = 0;
            this._groupReward = 0;
            this._requestDecision = false;
        }
    }

    agentIncrementStep(): void {
        this._stepCount += 1;
    }

    /**
     * @en
     * Used by the brain to make the agent perform a step.
     * 
     * @zh
     * 由 Brain 使用，使代理执行一步。
     */
    private agentStep(): void {
        if (this._requestAction && this._brain != null) {
            this._requestAction = false;
            this._actuatorManager.executeActions();
        }

        if (this._stepCount >= this.maxStep && this.maxStep > 0) {
            this.notifyAgentDone(DoneReason.MaxStepReached);
            this._agentReset();
        }
    }

    private decideAction(): void {
        // log("3 决定行动");
        if (this._actuatorManager.storedActions.continuousActions == null) {
            this.resetData();
        }
        const actions = this._brain?.decideAction() ?? new ActionBuffers();
        this._info.copyActions(actions);
        this._actuatorManager.updateActions(actions);
    }

    setMultiAgentGroup(multiAgentGroup: IMultiAgentGroup | null): void {
        if (multiAgentGroup == null) {
            this._groupId = 0;
        } else {
            const newGroupId = multiAgentGroup.getId();
            if (this._groupId == 0 || this._groupId == newGroupId) {
                this._groupId = newGroupId;
            } else {
                throw new CCCAgentsError("Agent is already registered with a group. Unregister it first.");
            }
        }
    }



}
