package test06;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class DynamicBatchingStrategy implements InferenceOptimizationStrategy {
    private int batchSize = 8;
    private int timeoutMs = 100;

    @Override
    public Tensor[] optimizeInference(Model model, RequestData[] requests) {
        System.out.println("📦 应用动态批处理策略");
        System.out.println("   - 批处理大小: " + batchSize);
        System.out.println("   - 超时时间: " + timeoutMs + "ms");

        // 模拟动态批处理
        List<RequestData[]> batches = createBatches(requests);
        List<Tensor> allOutputs = new ArrayList<>();

        for (RequestData[] batch : batches) {
            System.out.println("   - 处理批次大小: " + batch.length);
            Tensor[] preprocessedInputs = preprocessInputs(batch);
            Tensor[] batchOutputs = model.forward(preprocessedInputs);
            allOutputs.addAll(Arrays.asList(batchOutputs));
        }

        return allOutputs.toArray(new Tensor[0]);
    }

    private List<RequestData[]> createBatches(RequestData[] requests) {
        List<RequestData[]> batches = new ArrayList<>();
        for (int i = 0; i < requests.length; i += batchSize) {
            int end = Math.min(requests.length, i + batchSize);
            RequestData[] batch = Arrays.copyOfRange(requests, i, end);
            batches.add(batch);
        }
        return batches;
    }

    private Tensor[] preprocessInputs(RequestData[] requests) {
        Tensor[] inputs = new Tensor[requests.length];
        for (int i = 0; i < requests.length; i++) {
            inputs[i] = new Tensor(new float[]{1.0f, 0.0f, 0.5f}, new int[]{1, 3});
        }
        return inputs;
    }

    public void setBatchSize(int batchSize) {
        this.batchSize = batchSize;
    }

    public void setTimeout(int timeoutMs) {
        this.timeoutMs = timeoutMs;
    }
}