/**
 * Dawn Batch Processor - 基于Google Dawn的WebGPU批量命令处理器
 * 专为微信客户端优化的WebGPU批量渲染实现
 */

#pragma once

#include <memory>
#include <vector>
#include <unordered_map>
#include <queue>
#include <mutex>
#include <thread>
#include <atomic>

#include "dawn/webgpu.h"
#include "dawn/dawn_native/DawnNative.h"
#include "../bindings/webgpu_batch_bindings.h"

namespace webgpu {

// 资源池管理器
template<typename T>
class ResourcePool {
public:
    using ResourceId = uint32_t;
    
    ResourceId add(T&& resource) {
        std::lock_guard<std::mutex> lock(mutex_);
        ResourceId id = nextId_++;
        resources_[id] = std::move(resource);
        return id;
    }
    
    T* get(ResourceId id) {
        std::lock_guard<std::mutex> lock(mutex_);
        auto it = resources_.find(id);
        return it != resources_.end() ? &it->second : nullptr;
    }
    
    bool remove(ResourceId id) {
        std::lock_guard<std::mutex> lock(mutex_);
        return resources_.erase(id) > 0;
    }
    
    void clear() {
        std::lock_guard<std::mutex> lock(mutex_);
        resources_.clear();
    }
    
private:
    std::mutex mutex_;
    std::unordered_map<ResourceId, T> resources_;
    std::atomic<ResourceId> nextId_{1};
};

// 命令批次
struct CommandBatch {
    std::vector<uint8_t> data;
    size_t commandCount = 0;
    std::chrono::high_resolution_clock::time_point timestamp;
    
    CommandBatch() : timestamp(std::chrono::high_resolution_clock::now()) {}
};

// Dawn批量处理器
class DawnBatchProcessor {
public:
    struct Config {
        uint32_t maxBatchSize = 1000;           // 最大批次大小
        uint32_t maxBatchLatencyMs = 16;        // 最大批次延迟(毫秒)
        uint32_t workerThreads = 1;             // 工作线程数量
        bool enableValidation = false;          // 是否启用验证
        bool enableAsyncSubmission = true;      // 异步提交
    };
    
    explicit DawnBatchProcessor(const Config& config = {});
    ~DawnBatchProcessor();
    
    // 初始化Dawn环境
    bool initialize();
    
    // 提交批量命令
    void submitBatch(const uint8_t* data, size_t size);
    
    // 强制刷新所有待处理批次
    void flush();
    
    // 获取性能统计
    struct Stats {
        uint64_t totalBatches = 0;
        uint64_t totalCommands = 0;
        uint64_t totalBytes = 0;
        double avgBatchProcessingTime = 0.0;
        double avgCommandsPerBatch = 0.0;
    };
    
    Stats getStats() const;
    
    // 获取Dawn设备
    wgpu::Device getDevice() const { return device_; }
    
    // 设置错误回调
    void setErrorCallback(std::function<void(const std::string&)> callback) {
        errorCallback_ = std::move(callback);
    }
    
private:
    Config config_;
    
    // Dawn对象
    std::unique_ptr<dawn::native::Instance> dawnInstance_;
    wgpu::Adapter adapter_;
    wgpu::Device device_;
    wgpu::Queue queue_;
    
    // 资源池
    ResourcePool<wgpu::Buffer> buffers_;
    ResourcePool<wgpu::Texture> textures_;
    ResourcePool<wgpu::TextureView> textureViews_;
    ResourcePool<wgpu::ShaderModule> shaderModules_;
    ResourcePool<wgpu::RenderPipeline> renderPipelines_;
    ResourcePool<wgpu::ComputePipeline> computePipelines_;
    ResourcePool<wgpu::BindGroup> bindGroups_;
    ResourcePool<wgpu::CommandEncoder> commandEncoders_;
    
    // 批处理队列
    std::queue<std::unique_ptr<CommandBatch>> pendingBatches_;
    std::mutex batchMutex_;
    std::condition_variable batchCondition_;
    
    // 工作线程
    std::vector<std::thread> workerThreads_;
    std::atomic<bool> running_{false};
    
    // 性能统计
    mutable std::mutex statsMutex_;
    Stats stats_;
    
    // 错误处理
    std::function<void(const std::string&)> errorCallback_;
    
    // 内部方法
    bool initializeDawn();
    void workerThreadFunction();
    void processBatch(const CommandBatch& batch);
    void processCommand(const uint8_t*& data, size_t& remaining);
    
    // 命令处理器
    void handleCreateBuffer(const uint8_t*& data, size_t& remaining);
    void handleDestroyBuffer(const uint8_t*& data, size_t& remaining);
    void handleWriteBuffer(const uint8_t*& data, size_t& remaining);
    void handleCreateTexture(const uint8_t*& data, size_t& remaining);
    void handleCreateTextureView(const uint8_t*& data, size_t& remaining);
    void handleCreateShaderModule(const uint8_t*& data, size_t& remaining);
    void handleCreateRenderPipeline(const uint8_t*& data, size_t& remaining);
    void handleCreateComputePipeline(const uint8_t*& data, size_t& remaining);
    void handleCreateCommandEncoder(const uint8_t*& data, size_t& remaining);
    void handleBeginRenderPass(const uint8_t*& data, size_t& remaining);
    void handleEndRenderPass(const uint8_t*& data, size_t& remaining);
    void handleBeginComputePass(const uint8_t*& data, size_t& remaining);
    void handleEndComputePass(const uint8_t*& data, size_t& remaining);
    void handleSetRenderPipeline(const uint8_t*& data, size_t& remaining);
    void handleSetComputePipeline(const uint8_t*& data, size_t& remaining);
    void handleSetVertexBuffer(const uint8_t*& data, size_t& remaining);
    void handleSetIndexBuffer(const uint8_t*& data, size_t& remaining);
    void handleSetBindGroup(const uint8_t*& data, size_t& remaining);
    void handleDraw(const uint8_t*& data, size_t& remaining);
    void handleDrawIndexed(const uint8_t*& data, size_t& remaining);
    void handleDispatch(const uint8_t*& data, size_t& remaining);
    void handleSubmit(const uint8_t*& data, size_t& remaining);
    void handleBatchStart(const uint8_t*& data, size_t& remaining);
    void handleBatchEnd(const uint8_t*& data, size_t& remaining);
    
    // 辅助函数
    template<typename T>
    T read(const uint8_t*& data, size_t& remaining);
    
    std::string readString(const uint8_t*& data, size_t& remaining);
    std::vector<uint8_t> readBuffer(const uint8_t*& data, size_t& remaining);
    
    // 错误处理
    void reportError(const std::string& message);
    static void onDeviceError(WGPUErrorType type, const char* message, void* userdata);
    static void onDeviceLost(WGPUDeviceLostReason reason, const char* message, void* userdata);
    
    // 当前渲染状态
    struct RenderState {
        uint32_t currentEncoderId = 0;
        uint32_t currentRenderPassId = 0;
        uint32_t currentComputePassId = 0;
        bool inRenderPass = false;
        bool inComputePass = false;
        std::vector<uint32_t> pendingCommandBuffers;
    };
    
    thread_local static RenderState renderState_;
};

// 模板特化实现
template<>
inline uint8_t DawnBatchProcessor::read<uint8_t>(const uint8_t*& data, size_t& remaining) {
    if (remaining < 1) {
        reportError("Not enough data to read uint8_t");
        return 0;
    }
    uint8_t value = *data;
    data += 1;
    remaining -= 1;
    return value;
}

template<>
inline uint16_t DawnBatchProcessor::read<uint16_t>(const uint8_t*& data, size_t& remaining) {
    if (remaining < 2) {
        reportError("Not enough data to read uint16_t");
        return 0;
    }
    uint16_t value = *reinterpret_cast<const uint16_t*>(data);
    data += 2;
    remaining -= 2;
    return value;
}

template<>
inline uint32_t DawnBatchProcessor::read<uint32_t>(const uint8_t*& data, size_t& remaining) {
    if (remaining < 4) {
        reportError("Not enough data to read uint32_t");
        return 0;
    }
    uint32_t value = *reinterpret_cast<const uint32_t*>(data);
    data += 4;
    remaining -= 4;
    return value;
}

template<>
inline float DawnBatchProcessor::read<float>(const uint8_t*& data, size_t& remaining) {
    if (remaining < 4) {
        reportError("Not enough data to read float");
        return 0.0f;
    }
    float value = *reinterpret_cast<const float*>(data);
    data += 4;
    remaining -= 4;
    return value;
}

} // namespace webgpu