import { ITensorAllocator } from '../../Core/Backends/ITensorAllocator';
import { Model } from '../../Core/Model';
import { ISensor } from '../Sensors/ISensor';
import { ModelApiVersion } from './BarracudaModelParamLoader';
import {
    ActionMaskInputGenerator,
    BatchSizeGenerator,
    BiDimensionalOutputGenerator,
    ObservationGenerator,
    PreviousActionInputGenerator,
    RandomNormalInputGenerator,
    RecurrentInputGenerator,
    SequenceLengthGenerator,
} from './GeneratorImpl';
import { ModelExtensions } from './ModelExtensions';
import { AgentInfoSensorsPair } from './ModelRunner';
import { TensorNames } from './TensorNames';
import { TensorProxy } from './TensorProxy';

/**
 *
 * @zh
 * 用于生成 Tensor 数据的接口。
 */
export interface IGenerator {
    /**
     * @en
     * Modifies the data inside a Tensor according to the information contained in the
     * AgentInfos contained in the current batch.
     *
     * @zh
     * 根据当前批次中包含的 AgentInfos 中的信息修改 Tensor 中的数据。
     *
     * @param tensorProxy The tensor the data and shape will be modified.
     * @param batchSize The number of agents present in the current batch.
     * @param infos  List of AgentInfos containing the information that will be used to populate
     * the tensor's data.
     *
     */
    generate(tensorProxy: TensorProxy, batchSize: number, infos: AgentInfoSensorsPair[]): void;
}

/**
 * @en
 *  Mapping between Tensor names and generators.
 * A TensorGenerator implements a Dictionary of strings (node names) to an Action.
 * The Action take as argument the tensor, the current batch size and a Dictionary of
 * Agent to AgentInfo corresponding to the current batch.
 * Each Generator reshapes and fills the data of the tensor based of the data of the batch.
 * When the TensorProxy is an Input to the model, the shape of the Tensor will be modified
 * depending on the current batch size and the data of the Tensor will be filled using the
 * Dictionary of Agent to AgentInfo.
 * When the TensorProxy is an Output of the model, only the shape of the Tensor will be
 * modified using the current batch size. The data will be pre-filled with zeros.
 *
 * @zh
 * Tensor 名称和生成器之间的映射。
 * 一个 TensorGenerator 实现了一个字符串（节点名称）到一个 Action 的字典。
 * Action 接受 Tensor、当前批次大小和 Agent 到 AgentInfo 字典作为参数，这些 AgentInfo 对应于当前批次。
 * 每个生成器根据批次的数据重新整形和填充 Tensor 的数据。
 * 当 TensorProxy 是模型的输入时，Tensor 的形状将根据当前批次大小进行修改，并使用 Agent 到 AgentInfo 字典填充 Tensor 的数据。
 * 当 TensorProxy 是模型的输出时，只有 Tensor 的形状将根据当前批次大小进行修改。数据将预先填充为零。
 *
 */
export class TensorGenerator {
    private _dict: { [key: string]: IGenerator } = {};
    private _apiVersion: number = 0;

    constructor(
        seed: number,
        allocator: ITensorAllocator,
        memories: { [id: number]: number[] },
        barracudaModel: Model = null!,
        deterministicInference: boolean = false
    ) {
        if (barracudaModel === null) {
            return;
        }
        const model: Model = barracudaModel;
        this._apiVersion = ModelExtensions.getVersion(model);

        this._dict[TensorNames.batchSizePlaceholder] = new BatchSizeGenerator(allocator);
        this._dict[TensorNames.sequenceLengthPlaceholder] = new SequenceLengthGenerator(allocator);
        this._dict[TensorNames.recurrentInPlaceholder] = new RecurrentInputGenerator(allocator, memories);
        this._dict[TensorNames.previousActionPlaceholder] = new PreviousActionInputGenerator(allocator);
        this._dict[TensorNames.actionMaskPlaceholder] = new ActionMaskInputGenerator(allocator);
        this._dict[TensorNames.randomNormalEpsilonPlaceholder] = new RandomNormalInputGenerator(seed, allocator);

        if (ModelExtensions.hasContinuousOutputs(model, deterministicInference)) {
            this._dict[ModelExtensions.continuousOutputName(model, deterministicInference)] = new BiDimensionalOutputGenerator(
                allocator
            );
        }
        if (ModelExtensions.hasDiscreteOutputs(model, deterministicInference)) {
            this._dict[ModelExtensions.discreteOutputName(model, deterministicInference)] = new BiDimensionalOutputGenerator(allocator);
        }
        this._dict[TensorNames.recurrentOutput] = new BiDimensionalOutputGenerator(allocator);
        this._dict[TensorNames.valueEstimateOutput] = new BiDimensionalOutputGenerator(allocator);
    }

    public initializeObservations(sensors: ISensor[], allocator: ITensorAllocator): void {
        if (this._apiVersion === ModelApiVersion.MLAgents1_0) {
            let visIndex = 0;
            let vecObsGen: ObservationGenerator | null = null;

            for (let sensorIndex = 0; sensorIndex < sensors.length; sensorIndex++) {
                const sensor = sensors[sensorIndex];
                const rank = sensor.getObservationSpec().rank;
                let obsGen: ObservationGenerator | null = null;
                let obsGenName: string | null = null;

                switch (rank) {
                    case 1:
                        if (vecObsGen === null) {
                            vecObsGen = new ObservationGenerator(allocator);
                        }
                        obsGen = vecObsGen;
                        obsGenName = TensorNames.vectorObservationPlaceholder;
                        break;
                    case 2:
                        obsGen = new ObservationGenerator(allocator);
                        obsGenName = TensorNames.getObservationName(sensorIndex);
                        break;
                    case 3:
                        obsGen = new ObservationGenerator(allocator);
                        obsGenName = TensorNames.getVisualObservationName(visIndex);
                        visIndex++;
                        break;
                    default:
                        throw new Error(`Sensor ${sensor.getName()} have an invalid rank ${rank}`);
                }

                obsGen.addSensorIndex(sensorIndex);
                this._dict[obsGenName] = obsGen;
            }
        }

        if (this._apiVersion === ModelApiVersion.MLAgents2_0) {
            for (let sensorIndex = 0; sensorIndex < sensors.length; sensorIndex++) {
                const obsGen = new ObservationGenerator(allocator);
                const obsGenName = TensorNames.getObservationName(sensorIndex);
                obsGen.addSensorIndex(sensorIndex);
                this._dict[obsGenName] = obsGen;
            }
        }
    }

    public generateTensors(tensors: TensorProxy[], currentBatchSize: number, infos: AgentInfoSensorsPair[]): void {
        for (let tensorIndex = 0; tensorIndex < tensors.length; tensorIndex++) {
            const tensor = tensors[tensorIndex];

            if (!Reflect.has(this._dict, tensor.name)) {
                throw new Error(`Unknown tensorProxy expected as input : ${tensor.name}`);
            }
            this._dict[tensor.name].generate(tensor, currentBatchSize, infos);
        }
    }
}
