/**
 * Dawn Batch Processor Implementation
 * 基于Google Dawn的WebGPU批量命令处理器实现
 */

#include "dawn_batch_processor.h"
#include <iostream>
#include <chrono>
#include <algorithm>

namespace webgpu {

// thread_local 渲染状态
thread_local DawnBatchProcessor::RenderState DawnBatchProcessor::renderState_;

DawnBatchProcessor::DawnBatchProcessor(const Config& config) 
    : config_(config) {
}

DawnBatchProcessor::~DawnBatchProcessor() {
    running_ = false;
    
    // 通知所有工作线程退出
    batchCondition_.notify_all();
    
    // 等待所有工作线程结束
    for (auto& thread : workerThreads_) {
        if (thread.joinable()) {
            thread.join();
        }
    }
    
    // 清理资源池
    buffers_.clear();
    textures_.clear();
    textureViews_.clear();
    shaderModules_.clear();
    renderPipelines_.clear();
    computePipelines_.clear();
    bindGroups_.clear();
    commandEncoders_.clear();
}

bool DawnBatchProcessor::initialize() {
    if (!initializeDawn()) {
        reportError("Failed to initialize Dawn");
        return false;
    }
    
    // 启动工作线程
    running_ = true;
    workerThreads_.reserve(config_.workerThreads);
    
    for (uint32_t i = 0; i < config_.workerThreads; ++i) {
        workerThreads_.emplace_back(&DawnBatchProcessor::workerThreadFunction, this);
    }
    
    std::cout << "DawnBatchProcessor initialized with " << config_.workerThreads 
              << " worker threads" << std::endl;
    
    return true;
}

bool DawnBatchProcessor::initializeDawn() {
    // 创建Dawn native实例
    dawnInstance_ = std::make_unique<dawn::native::Instance>();
    
    // 发现适配器
    dawnInstance_->DiscoverDefaultAdapters();
    std::vector<dawn::native::Adapter> adapters = dawnInstance_->GetAdapters();
    
    if (adapters.empty()) {
        reportError("No WebGPU adapters found");
        return false;
    }
    
    // 选择最佳适配器（优先选择独立显卡）
    dawn::native::Adapter selectedAdapter;
    for (const auto& adapter : adapters) {
        wgpu::AdapterProperties properties;
        adapter.GetProperties(&properties);
        
        if (properties.adapterType == wgpu::AdapterType::DiscreteGPU) {
            selectedAdapter = adapter;
            break;
        }
        
        if (!selectedAdapter) {
            selectedAdapter = adapter;
        }
    }
    
    adapter_ = wgpu::Adapter(selectedAdapter.Get());
    
    // 设置设备特性
    std::vector<wgpu::FeatureName> requiredFeatures;
    // 根据需要添加特性
    
    wgpu::DeviceDescriptor deviceDesc{};
    deviceDesc.label = "WebGPU Batch Processor Device";
    deviceDesc.requiredFeaturesCount = requiredFeatures.size();
    deviceDesc.requiredFeatures = requiredFeatures.data();
    
    // 创建设备
    wgpu::Device device = nullptr;
    auto callback = [](WGPURequestDeviceStatus status, WGPUDevice result, 
                      const char* message, void* userdata) {
        if (status == WGPURequestDeviceStatus_Success) {
            *static_cast<wgpu::Device*>(userdata) = wgpu::Device::Acquire(result);
        } else {
            std::cerr << "Failed to request device: " << (message ? message : "Unknown error") << std::endl;
        }
    };
    
    adapter_.RequestDevice(&deviceDesc, callback, &device);
    
    // 等待设备创建完成
    while (!device) {
        dawnInstance_->ProcessEvents();
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
    }
    
    device_ = device;
    
    // 设置错误回调
    device_.SetUncapturedErrorCallback(onDeviceError, this);
    device_.SetDeviceLostCallback(onDeviceLost, this);
    
    // 获取队列
    queue_ = device_.GetQueue();
    
    std::cout << "Dawn WebGPU device initialized successfully" << std::endl;
    return true;
}

void DawnBatchProcessor::submitBatch(const uint8_t* data, size_t size) {
    if (!running_ || !data || size == 0) {
        return;
    }
    
    auto batch = std::make_unique<CommandBatch>();
    batch->data.assign(data, data + size);
    
    // 计算命令数量（粗略估计）
    batch->commandCount = size / sizeof(CommandHeader);
    
    {
        std::lock_guard<std::mutex> lock(batchMutex_);
        pendingBatches_.push(std::move(batch));
    }
    
    batchCondition_.notify_one();
}

void DawnBatchProcessor::flush() {
    // 强制处理所有待处理的批次
    std::unique_lock<std::mutex> lock(batchMutex_);
    
    while (!pendingBatches_.empty()) {
        lock.unlock();
        std::this_thread::sleep_for(std::chrono::microseconds(100));
        lock.lock();
    }
}

void DawnBatchProcessor::workerThreadFunction() {
    while (running_) {
        std::unique_ptr<CommandBatch> batch;
        
        {
            std::unique_lock<std::mutex> lock(batchMutex_);
            batchCondition_.wait(lock, [this] { 
                return !pendingBatches_.empty() || !running_; 
            });
            
            if (!running_) break;
            
            if (!pendingBatches_.empty()) {
                batch = std::move(pendingBatches_.front());
                pendingBatches_.pop();
            }
        }
        
        if (batch) {
            auto startTime = std::chrono::high_resolution_clock::now();
            
            processBatch(*batch);
            
            auto endTime = std::chrono::high_resolution_clock::now();
            auto duration = std::chrono::duration<double, std::milli>(endTime - startTime);
            
            // 更新统计信息
            {
                std::lock_guard<std::mutex> lock(statsMutex_);
                stats_.totalBatches++;
                stats_.totalCommands += batch->commandCount;
                stats_.totalBytes += batch->data.size();
                
                // 计算平均值
                double totalTime = stats_.avgBatchProcessingTime * (stats_.totalBatches - 1) + duration.count();
                stats_.avgBatchProcessingTime = totalTime / stats_.totalBatches;
                
                stats_.avgCommandsPerBatch = static_cast<double>(stats_.totalCommands) / stats_.totalBatches;
            }
        }
    }
}

void DawnBatchProcessor::processBatch(const CommandBatch& batch) {
    const uint8_t* data = batch.data.data();
    size_t remaining = batch.data.size();
    
    try {
        while (remaining >= sizeof(CommandHeader)) {
            processCommand(data, remaining);
        }
        
        // 提交所有待处理的命令缓冲区
        if (!renderState_.pendingCommandBuffers.empty()) {
            std::vector<wgpu::CommandBuffer> commandBuffers;
            commandBuffers.reserve(renderState_.pendingCommandBuffers.size());
            
            for (uint32_t bufferId : renderState_.pendingCommandBuffers) {
                // 从命令编码器创建命令缓冲区的逻辑
                // 这里需要根据实际的命令编码器管理来实现
            }
            
            if (!commandBuffers.empty()) {
                queue_.Submit(commandBuffers.size(), commandBuffers.data());
            }
            
            renderState_.pendingCommandBuffers.clear();
        }
        
    } catch (const std::exception& e) {
        reportError("Error processing batch: " + std::string(e.what()));
    }
}

void DawnBatchProcessor::processCommand(const uint8_t*& data, size_t& remaining) {
    if (remaining < sizeof(CommandHeader)) {
        reportError("Invalid command: insufficient data for header");
        return;
    }
    
    const CommandHeader* header = reinterpret_cast<const CommandHeader*>(data);
    CommandType type = static_cast<CommandType>(header->type);
    
    data += sizeof(CommandHeader);
    remaining -= sizeof(CommandHeader);
    
    switch (type) {
        case CommandType::CREATE_BUFFER:
            handleCreateBuffer(data, remaining);
            break;
        case CommandType::DESTROY_BUFFER:
            handleDestroyBuffer(data, remaining);
            break;
        case CommandType::WRITE_BUFFER:
            handleWriteBuffer(data, remaining);
            break;
        case CommandType::CREATE_TEXTURE:
            handleCreateTexture(data, remaining);
            break;
        case CommandType::CREATE_SHADER_MODULE:
            handleCreateShaderModule(data, remaining);
            break;
        case CommandType::CREATE_RENDER_PIPELINE:
            handleCreateRenderPipeline(data, remaining);
            break;
        case CommandType::CREATE_COMMAND_ENCODER:
            handleCreateCommandEncoder(data, remaining);
            break;
        case CommandType::BEGIN_RENDER_PASS:
            handleBeginRenderPass(data, remaining);
            break;
        case CommandType::END_RENDER_PASS:
            handleEndRenderPass(data, remaining);
            break;
        case CommandType::SET_PIPELINE:
            handleSetRenderPipeline(data, remaining);
            break;
        case CommandType::SET_VERTEX_BUFFER:
            handleSetVertexBuffer(data, remaining);
            break;
        case CommandType::SET_INDEX_BUFFER:
            handleSetIndexBuffer(data, remaining);
            break;
        case CommandType::DRAW:
            handleDraw(data, remaining);
            break;
        case CommandType::DRAW_INDEXED:
            handleDrawIndexed(data, remaining);
            break;
        case CommandType::SUBMIT:
            handleSubmit(data, remaining);
            break;
        case CommandType::BATCH_START:
            handleBatchStart(data, remaining);
            break;
        case CommandType::BATCH_END:
            handleBatchEnd(data, remaining);
            break;
        default:
            reportError("Unknown command type: " + std::to_string(static_cast<int>(type)));
            break;
    }
}

void DawnBatchProcessor::handleCreateBuffer(const uint8_t*& data, size_t& remaining) {
    uint32_t id = read<uint32_t>(data, remaining);
    uint32_t size = read<uint32_t>(data, remaining);
    uint32_t usage = read<uint32_t>(data, remaining);
    uint8_t mappedAtCreation = read<uint8_t>(data, remaining);
    
    wgpu::BufferDescriptor desc{};
    desc.size = size;
    desc.usage = static_cast<wgpu::BufferUsage>(usage);
    desc.mappedAtCreation = mappedAtCreation != 0;
    
    wgpu::Buffer buffer = device_.CreateBuffer(&desc);
    buffers_.add(std::move(buffer));
}

void DawnBatchProcessor::handleWriteBuffer(const uint8_t*& data, size_t& remaining) {
    uint32_t bufferId = read<uint32_t>(data, remaining);
    uint32_t offset = read<uint32_t>(data, remaining);
    std::vector<uint8_t> bufferData = readBuffer(data, remaining);
    
    wgpu::Buffer* buffer = buffers_.get(bufferId);
    if (buffer) {
        queue_.WriteBuffer(*buffer, offset, bufferData.data(), bufferData.size());
    }
}

void DawnBatchProcessor::handleCreateTexture(const uint8_t*& data, size_t& remaining) {
    uint32_t id = read<uint32_t>(data, remaining);
    uint32_t width = read<uint32_t>(data, remaining);
    uint32_t height = read<uint32_t>(data, remaining);
    uint32_t depthOrArrayLayers = read<uint32_t>(data, remaining);
    std::string format = readString(data, remaining);
    uint32_t usage = read<uint32_t>(data, remaining);
    uint32_t mipLevelCount = read<uint32_t>(data, remaining);
    uint32_t sampleCount = read<uint32_t>(data, remaining);
    
    wgpu::TextureDescriptor desc{};
    desc.size = {width, height, depthOrArrayLayers};
    desc.format = static_cast<wgpu::TextureFormat>(0); // 需要格式映射
    desc.usage = static_cast<wgpu::TextureUsage>(usage);
    desc.mipLevelCount = mipLevelCount;
    desc.sampleCount = sampleCount;
    
    wgpu::Texture texture = device_.CreateTexture(&desc);
    textures_.add(std::move(texture));
}

void DawnBatchProcessor::handleCreateShaderModule(const uint8_t*& data, size_t& remaining) {
    uint32_t id = read<uint32_t>(data, remaining);
    std::string code = readString(data, remaining);
    
    wgpu::ShaderModuleWGSLDescriptor wgslDesc{};
    wgslDesc.code = code.c_str();
    
    wgpu::ShaderModuleDescriptor desc{};
    desc.nextInChain = &wgslDesc;
    
    wgpu::ShaderModule module = device_.CreateShaderModule(&desc);
    shaderModules_.add(std::move(module));
}

void DawnBatchProcessor::handleBeginRenderPass(const uint8_t*& data, size_t& remaining) {
    if (!renderState_.currentEncoderId) {
        // 创建命令编码器
        wgpu::CommandEncoderDescriptor encoderDesc{};
        wgpu::CommandEncoder encoder = device_.CreateCommandEncoder(&encoderDesc);
        renderState_.currentEncoderId = commandEncoders_.add(std::move(encoder));
    }
    
    wgpu::CommandEncoder* encoder = commandEncoders_.get(renderState_.currentEncoderId);
    if (!encoder) {
        reportError("No valid command encoder for render pass");
        return;
    }
    
    // 解析渲染通道描述符
    uint32_t colorAttachmentCount = read<uint32_t>(data, remaining);
    
    std::vector<wgpu::RenderPassColorAttachment> colorAttachments(colorAttachmentCount);
    
    for (uint32_t i = 0; i < colorAttachmentCount; ++i) {
        uint8_t hasAttachment = read<uint8_t>(data, remaining);
        if (hasAttachment) {
            uint32_t textureViewId = read<uint32_t>(data, remaining);
            wgpu::TextureView* view = textureViews_.get(textureViewId);
            if (view) {
                colorAttachments[i].view = *view;
                colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
                colorAttachments[i].storeOp = wgpu::StoreOp::Store;
                colorAttachments[i].clearValue = {0.0, 0.0, 0.0, 1.0};
            }
        }
    }
    
    uint8_t hasDepthStencil = read<uint8_t>(data, remaining);
    wgpu::RenderPassDepthStencilAttachment depthStencilAttachment{};
    
    wgpu::RenderPassDescriptor renderPassDesc{};
    renderPassDesc.colorAttachmentCount = colorAttachmentCount;
    renderPassDesc.colorAttachments = colorAttachments.data();
    
    if (hasDepthStencil) {
        uint32_t depthViewId = read<uint32_t>(data, remaining);
        wgpu::TextureView* depthView = textureViews_.get(depthViewId);
        if (depthView) {
            depthStencilAttachment.view = *depthView;
            depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
            depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
            depthStencilAttachment.depthClearValue = 1.0f;
            renderPassDesc.depthStencilAttachment = &depthStencilAttachment;
        }
    }
    
    // 开始渲染通道
    wgpu::RenderPassEncoder renderPass = encoder->BeginRenderPass(&renderPassDesc);
    uint32_t renderPassId = renderPipelines_.add(std::move(renderPass)); // 临时使用renderPipelines_存储
    renderState_.currentRenderPassId = renderPassId;
    renderState_.inRenderPass = true;
}

void DawnBatchProcessor::handleDraw(const uint8_t*& data, size_t& remaining) {
    if (!renderState_.inRenderPass || !renderState_.currentRenderPassId) {
        reportError("Draw command outside of render pass");
        return;
    }
    
    uint32_t vertexCount = read<uint32_t>(data, remaining);
    uint32_t instanceCount = read<uint32_t>(data, remaining);
    uint32_t firstVertex = read<uint32_t>(data, remaining);
    uint32_t firstInstance = read<uint32_t>(data, remaining);
    
    // 这里需要实际获取RenderPassEncoder
    // 简化处理，实际需要更复杂的状态管理
}

void DawnBatchProcessor::handleSubmit(const uint8_t*& data, size_t& remaining) {
    uint32_t commandBufferCount = read<uint32_t>(data, remaining);
    
    if (renderState_.currentEncoderId) {
        wgpu::CommandEncoder* encoder = commandEncoders_.get(renderState_.currentEncoderId);
        if (encoder) {
            wgpu::CommandBuffer commandBuffer = encoder->Finish();
            queue_.Submit(1, &commandBuffer);
        }
        
        renderState_.currentEncoderId = 0;
        renderState_.inRenderPass = false;
        renderState_.inComputePass = false;
    }
}

std::string DawnBatchProcessor::readString(const uint8_t*& data, size_t& remaining) {
    uint32_t length = read<uint32_t>(data, remaining);
    if (remaining < length) {
        reportError("Not enough data to read string");
        return {};
    }
    
    std::string str(reinterpret_cast<const char*>(data), length);
    data += length;
    remaining -= length;
    return str;
}

std::vector<uint8_t> DawnBatchProcessor::readBuffer(const uint8_t*& data, size_t& remaining) {
    uint32_t length = read<uint32_t>(data, remaining);
    if (remaining < length) {
        reportError("Not enough data to read buffer");
        return {};
    }
    
    std::vector<uint8_t> buffer(data, data + length);
    data += length;
    remaining -= length;
    return buffer;
}

void DawnBatchProcessor::reportError(const std::string& message) {
    if (errorCallback_) {
        errorCallback_(message);
    } else {
        std::cerr << "DawnBatchProcessor Error: " << message << std::endl;
    }
}

void DawnBatchProcessor::onDeviceError(WGPUErrorType type, const char* message, void* userdata) {
    auto* processor = static_cast<DawnBatchProcessor*>(userdata);
    std::string errorMsg = "WebGPU Device Error: " + std::string(message ? message : "Unknown error");
    processor->reportError(errorMsg);
}

void DawnBatchProcessor::onDeviceLost(WGPUDeviceLostReason reason, const char* message, void* userdata) {
    auto* processor = static_cast<DawnBatchProcessor*>(userdata);
    std::string errorMsg = "WebGPU Device Lost: " + std::string(message ? message : "Unknown reason");
    processor->reportError(errorMsg);
}

DawnBatchProcessor::Stats DawnBatchProcessor::getStats() const {
    std::lock_guard<std::mutex> lock(statsMutex_);
    return stats_;
}

// 空的处理函数占位符
void DawnBatchProcessor::handleDestroyBuffer(const uint8_t*& data, size_t& remaining) {
    uint32_t id = read<uint32_t>(data, remaining);
    buffers_.remove(id);
}

void DawnBatchProcessor::handleCreateTextureView(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleCreateRenderPipeline(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleCreateComputePipeline(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleCreateCommandEncoder(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleEndRenderPass(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleBeginComputePass(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleEndComputePass(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleSetRenderPipeline(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleSetComputePipeline(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleSetVertexBuffer(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleSetIndexBuffer(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleSetBindGroup(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleDrawIndexed(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleDispatch(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleBatchStart(const uint8_t*& data, size_t& remaining) {}
void DawnBatchProcessor::handleBatchEnd(const uint8_t*& data, size_t& remaining) {}

} // namespace webgpu