/**
 * WebGPU批量渲染C++端单元测试
 * 测试Dawn批处理器和序列化系统
 */

#include <gtest/gtest.h>
#include <vector>
#include <chrono>
#include <thread>

#include "../src/cpp/dawn_batch_processor.h"
#include "../src/bindings/webgpu_batch_bindings.h"

using namespace webgpu;

class WebGPUBatchTest : public ::testing::Test {
protected:
    void SetUp() override {
        config_.maxBatchSize = 100;
        config_.maxBatchLatencyMs = 16;
        config_.workerThreads = 1;
        config_.enableValidation = true;
        config_.enableAsyncSubmission = true;
        
        processor_ = std::make_unique<DawnBatchProcessor>(config_);
        ASSERT_TRUE(processor_->initialize()) << "Failed to initialize batch processor";
    }
    
    void TearDown() override {
        processor_.reset();
    }
    
    // 创建基础命令数据
    std::vector<uint8_t> createCreateBufferCommand(uint32_t id, uint32_t size, uint32_t usage) {
        std::vector<uint8_t> data;
        
        // 命令头部
        CommandHeader header;
        header.type = static_cast<uint8_t>(CommandType::CREATE_BUFFER);
        header.flags = 0;
        header.id = id;
        header.size = sizeof(CommandHeader) + 4 + 4 + 4 + 1; // header + id + size + usage + mappedAtCreation
        
        data.resize(header.size);
        uint8_t* ptr = data.data();
        
        // 写入头部
        *reinterpret_cast<CommandHeader*>(ptr) = header;
        ptr += sizeof(CommandHeader);
        
        // 写入参数
        *reinterpret_cast<uint32_t*>(ptr) = id;
        ptr += 4;
        *reinterpret_cast<uint32_t*>(ptr) = size;
        ptr += 4;
        *reinterpret_cast<uint32_t*>(ptr) = usage;
        ptr += 4;
        *reinterpret_cast<uint8_t*>(ptr) = 0; // mappedAtCreation = false
        
        return data;
    }
    
    std::vector<uint8_t> createWriteBufferCommand(uint32_t bufferId, uint32_t offset, const std::vector<uint8_t>& bufferData) {
        std::vector<uint8_t> data;
        
        CommandHeader header;
        header.type = static_cast<uint8_t>(CommandType::WRITE_BUFFER);
        header.flags = 0;
        header.id = bufferId;
        header.size = sizeof(CommandHeader) + 4 + 4 + 4 + bufferData.size(); // header + bufferId + offset + dataSize + data
        
        data.resize(header.size);
        uint8_t* ptr = data.data();
        
        // 写入头部
        *reinterpret_cast<CommandHeader*>(ptr) = header;
        ptr += sizeof(CommandHeader);
        
        // 写入参数
        *reinterpret_cast<uint32_t*>(ptr) = bufferId;
        ptr += 4;
        *reinterpret_cast<uint32_t*>(ptr) = offset;
        ptr += 4;
        *reinterpret_cast<uint32_t*>(ptr) = bufferData.size();
        ptr += 4;
        
        // 写入数据
        std::memcpy(ptr, bufferData.data(), bufferData.size());
        
        return data;
    }
    
protected:
    DawnBatchProcessor::Config config_;
    std::unique_ptr<DawnBatchProcessor> processor_;
};

// 测试基础初始化
TEST_F(WebGPUBatchTest, BasicInitialization) {
    EXPECT_NE(processor_->getDevice().Get(), nullptr);
    
    auto stats = processor_->getStats();
    EXPECT_EQ(stats.totalBatches, 0);
    EXPECT_EQ(stats.totalCommands, 0);
    EXPECT_EQ(stats.totalBytes, 0);
}

// 测试单个命令处理
TEST_F(WebGPUBatchTest, SingleCommandProcessing) {
    // 创建缓冲区命令
    auto bufferCommand = createCreateBufferCommand(1, 1024, static_cast<uint32_t>(wgpu::BufferUsage::Vertex));
    
    // 提交命令
    processor_->submitBatch(bufferCommand.data(), bufferCommand.size());
    
    // 等待处理完成
    std::this_thread::sleep_for(std::chrono::milliseconds(100));
    processor_->flush();
    
    // 检查统计信息
    auto stats = processor_->getStats();
    EXPECT_GE(stats.totalBatches, 1);
    EXPECT_GE(stats.totalCommands, 1);
    EXPECT_GT(stats.totalBytes, 0);
}

// 测试批量命令处理
TEST_F(WebGPUBatchTest, BatchCommandProcessing) {
    std::vector<uint8_t> batchData;
    
    // 创建多个缓冲区命令
    for (uint32_t i = 1; i <= 10; ++i) {
        auto command = createCreateBufferCommand(i, 1024 * i, static_cast<uint32_t>(wgpu::BufferUsage::Vertex));
        batchData.insert(batchData.end(), command.begin(), command.end());
    }
    
    // 提交批量命令
    processor_->submitBatch(batchData.data(), batchData.size());
    
    // 等待处理完成
    std::this_thread::sleep_for(std::chrono::milliseconds(200));
    processor_->flush();
    
    // 检查统计信息
    auto stats = processor_->getStats();
    EXPECT_GE(stats.totalCommands, 10);
    EXPECT_GT(stats.avgCommandsPerBatch, 0);
}

// 测试缓冲区写入命令
TEST_F(WebGPUBatchTest, BufferWriteCommand) {
    // 首先创建一个缓冲区
    auto createCommand = createCreateBufferCommand(1, 1024, 
        static_cast<uint32_t>(wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst));
    
    processor_->submitBatch(createCommand.data(), createCommand.size());
    
    // 等待缓冲区创建完成
    std::this_thread::sleep_for(std::chrono::milliseconds(50));
    
    // 创建写入数据
    std::vector<uint8_t> testData = {1, 2, 3, 4, 5, 6, 7, 8};
    auto writeCommand = createWriteBufferCommand(1, 0, testData);
    
    processor_->submitBatch(writeCommand.data(), writeCommand.size());
    
    // 等待处理完成
    std::this_thread::sleep_for(std::chrono::milliseconds(100));
    processor_->flush();
    
    auto stats = processor_->getStats();
    EXPECT_GE(stats.totalCommands, 2); // 创建 + 写入
}

// 测试性能基准
TEST_F(WebGPUBatchTest, PerformanceBenchmark) {
    const int numCommands = 1000;
    const int numIterations = 10;
    
    std::vector<double> batchTimes;
    
    for (int iter = 0; iter < numIterations; ++iter) {
        std::vector<uint8_t> batchData;
        
        // 创建大量命令
        for (int i = 0; i < numCommands; ++i) {
            auto command = createCreateBufferCommand(i + 1, 1024, static_cast<uint32_t>(wgpu::BufferUsage::Vertex));
            batchData.insert(batchData.end(), command.begin(), command.end());
        }
        
        // 测量处理时间
        auto startTime = std::chrono::high_resolution_clock::now();
        processor_->submitBatch(batchData.data(), batchData.size());
        processor_->flush();
        auto endTime = std::chrono::high_resolution_clock::now();
        
        auto duration = std::chrono::duration<double, std::milli>(endTime - startTime);
        batchTimes.push_back(duration.count());
        
        std::cout << "Iteration " << iter + 1 << ": " << duration.count() << "ms for " << numCommands << " commands" << std::endl;
    }
    
    // 计算平均性能
    double avgTime = 0.0;
    for (double time : batchTimes) {
        avgTime += time;
    }
    avgTime /= numIterations;
    
    double commandsPerSecond = (numCommands * 1000.0) / avgTime;
    
    std::cout << "Performance Results:" << std::endl;
    std::cout << "Average time per batch: " << avgTime << "ms" << std::endl;
    std::cout << "Commands per second: " << commandsPerSecond << std::endl;
    
    // 性能断言 - 至少应该能处理10000命令/秒
    EXPECT_GT(commandsPerSecond, 10000.0) << "Batch processor performance is too slow";
}

// 测试错误处理
TEST_F(WebGPUBatchTest, ErrorHandling) {
    // 创建无效命令数据
    std::vector<uint8_t> invalidData = {0xFF, 0xFF, 0xFF, 0xFF}; // 无效的命令类型
    
    // 设置错误回调
    bool errorCaught = false;
    processor_->setErrorCallback([&errorCaught](const std::string& message) {
        errorCaught = true;
        std::cout << "Caught error: " << message << std::endl;
    });
    
    // 提交无效数据
    processor_->submitBatch(invalidData.data(), invalidData.size());
    
    // 等待处理
    std::this_thread::sleep_for(std::chrono::milliseconds(100));
    processor_->flush();
    
    // 应该捕获到错误
    EXPECT_TRUE(errorCaught);
}

// 测试多线程安全性
TEST_F(WebGPUBatchTest, ThreadSafety) {
    const int numThreads = 4;
    const int commandsPerThread = 100;
    
    std::vector<std::thread> threads;
    std::atomic<int> completedThreads{0};
    
    auto workerFunction = [this, commandsPerThread, &completedThreads](int threadId) {
        for (int i = 0; i < commandsPerThread; ++i) {
            uint32_t bufferId = threadId * commandsPerThread + i + 1;
            auto command = createCreateBufferCommand(bufferId, 1024, static_cast<uint32_t>(wgpu::BufferUsage::Vertex));
            processor_->submitBatch(command.data(), command.size());
        }
        completedThreads++;
    };
    
    // 启动工作线程
    for (int i = 0; i < numThreads; ++i) {
        threads.emplace_back(workerFunction, i);
    }
    
    // 等待所有线程完成
    for (auto& thread : threads) {
        thread.join();
    }
    
    // 等待所有命令处理完成
    processor_->flush();
    
    // 检查所有命令都被处理了
    auto stats = processor_->getStats();
    EXPECT_EQ(completedThreads.load(), numThreads);
    EXPECT_GE(stats.totalCommands, numThreads * commandsPerThread);
}

// 主函数
int main(int argc, char** argv) {
    ::testing::InitGoogleTest(&argc, argv);
    
    std::cout << "Running WebGPU Batch Rendering Tests..." << std::endl;
    std::cout << "=========================================" << std::endl;
    
    int result = RUN_ALL_TESTS();
    
    std::cout << "=========================================" << std::endl;
    std::cout << "WebGPU Batch Rendering Tests Complete" << std::endl;
    
    return result;
}