import { Agent } from "./Agent";
import { IMultiAgentGroup } from "./IMultiAgentGroup";

export class MultiAgentGroupIdCounter {

    private static _counter: number = 0;

    public static getGroupId(): number {
        return this._counter++;
    }
}


/**
 * @en
 * A basic class implementation of MultiAgentGroup.
 * 
 * @zh
 * MultiAgentGroup 的基本类实现。
 */
export class SimpleMultiAgentGroup implements IMultiAgentGroup {
    private readonly _id: number = MultiAgentGroupIdCounter.getGroupId();
    private _agents: Set<Agent> = new Set<Agent>();

    public dispose(): void {
        while (this._agents.size > 0) {
            this.unregisterAgent(this._agents.values().next().value);
        }
    }

    public registerAgent(agent: Agent): void {
        if (!this._agents.has(agent)) {
            agent.setMultiAgentGroup(this);
            this._agents.add(agent);
            agent.onAgentDisabled = this.unregisterAgent.bind(this);
        }
    }

    public unregisterAgent(agent: Agent): void {
        if (this._agents.has(agent)) {
            agent.setMultiAgentGroup(null);
            this._agents.delete(agent);
            agent.onAgentDisabled = (agent: Agent) => { };
        }
    }

    public getId(): number {
        return this._id;
    }

    public getRegisteredAgents(): Agent[] {
        return Array.from(this._agents);
    }

    /**
     * @en
     * Increments the group rewards for all agents in this MultiAgentGroup.
     * 
     *  This function increases or decreases the group rewards by a given amount for all agents
     * in the group. Use {@link SetGroupReward(float)} to set the group reward assigned
     * to the current step with a specific value rather than increasing or decreasing it.
     *
     * A positive group reward indicates the whole group's accomplishments or desired behaviors.
     * Every agent in the group will receive the same group reward no matter whether the
     * agent's act directly leads to the reward. Group rewards are meant to reinforce agents
     * to act in the group's best interest instead of individual ones.
     * Group rewards are treated differently than individual agent rewards during training, so
     * calling AddGroupReward() is not equivalent to calling agent.AddReward() on each agent in the group.
     * 
     * 
     * @zh
     * 为此 MultiAgentGroup 中的所有智能体增加组奖励。
     * 
     * 此函数增加或减少组奖励的数量，以便所有组中的智能体。使用 {@link SetGroupReward(float)} 为当前步骤分配的组奖励设置特定值，而不是增加或减少它。
     * 
     * 正组奖励表示整个组的成就或期望行为。无论智能体的行为是否直接导致奖励，组中的每个智能体都将获得相同的组奖励。
     * 组奖励旨在加强智能体以最佳利益行事，而不是个人利益。
     * 在训练期间，组奖励与个体智能体奖励的处理方式不同，因此调用 AddGroupReward() 与在组中的每个智能体上调用 agent.AddReward() 不等效。
     * 
     * 
     * @param reward 
     */
    public addGroupReward(reward: number): void {
        for (const agent of this._agents) {
            agent.addGroupReward(reward);
        }
    }

    public setGroupReward(reward: number): void {
        for (const agent of this._agents) {
            agent.setGroupReward(reward);
        }
    }


    /**
     * @en
     *  End episodes for all agents in this MultiAgentGroup.
     * @zh
     * 结束此 MultiAgentGroup 中所有智能体的剧集。
     */
    public endGroupEpisode(): void {
        for (const agent of this._agents) {
            agent.endEpisode();
        }
    }

    /**
     * @en
     *  Indicate that the episode is over but not due to the "fault" of the group.
     * This has the same end result as calling {@link endGroupEpisode}, but has a
     * slightly different effect on training.
     * 
     *  This should be used when the episode could continue, but has gone on for
     *  a sufficient number of steps, such as if the environment hits some maximum number of steps.
     * 
     * @zh
     * 表示剧集已经结束，但不是由于组的“故障”。
     * 这与调用 {@link endGroupEpisode} 具有相同的最终结果，但对训练的影响略有不同。
     * 
     * 当剧集可以继续进行，但已经进行了足够数量的步骤时，例如，如果环境达到了某个最大步数。
     * 
     */
    public groupEpisodeInterrupted(): void {
        for (const agent of this._agents) {
            agent.episodeInterrupted();
        }
    }
}