
import * as util from './util.js';
import * as primitive from './primitive.js';   



async function main() {
    // WebGPU 是异步 API，所以需要使用 async/await 来处理
    const adapter = await navigator.gpu?.requestAdapter();
    const device = await adapter?.requestDevice();
    if (!device) {
        util.fail('need a browser that supports WebGPU');
        return;
    }

    const canvas = document.querySelector('canvas');    
    const context = canvas.getContext('webgpu');
    // 询问系统首选的画布格式是什么( rgba8unorm 或 bgra8unorm)。
    // 这其实并不重要，重要的是通过查询，可以让用户的系统以最快的速度运行。
    const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
    // 调用 configure 将format传入 webgpu 画布上下文。
    // 将device传入画布，从而将画布与创建的设备关联起来。
    context.configure({
        device,
        format: presentationFormat,
    });

    const wgslCode = await util.loadWGSLSoureFile('./shaders/vertexbuffer.wgsl');

    const module = device.createShaderModule({
        label: 'multiple uniforms shaders',
        code: wgslCode,
    });

    const pipeline = device.createRenderPipeline({
        label: 'multiple uniforms pipeline',
        layout: 'auto',
        vertex: {
            module,
            // 描述如何从一个或多个顶点缓冲区中提取数据
            buffers: [
                {
                    // stride 是指从缓冲区中一个顶点到缓冲区中下一个顶点所需的字节数。
                    arrayStride: 2 * 4, // 2 floats, 4 bytes each
                    attributes: [
                        {
                            shaderLocation: 0, // 对应Vertex结构体中的的location(0) 
                            offset: 0, // 表示对于该attribute数组来说数据是从顶点缓冲区中的第 0 个偏移位置开始的
                            format: 'float32x2'
                        }, // position
                    ],
                }
            ],
        },
        fragment: {
            module,
            targets: [{ format: presentationFormat }],
        },
    });

    const {vertexData, numVertices} = primitive.createCircle({
        radius: 0.5,
        innerRadius: 0.25,
    });
    // 顶点缓冲区
    const vertexBuffer = device.createBuffer({
        label: 'vertex buffer',
        size: vertexData.byteLength,
        // 将顶点数据缓冲区的用途从 STORAGE 更改为 VERTEX
        usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
    });
    device.queue.writeBuffer(vertexBuffer, 0, vertexData);


    const kNumObjects = 100;
    const objectInfos = [];
    
    // create 2 buffers for the uniform values
    const staticBufferSize =
        4 * 4 + // color is 4 32bit floats (4bytes each)
        2 * 4 + // offset is 2 32bit floats (4bytes each)
        2 * 4;  // padding to align the next uniform to 16 bytes
    const scaleSize = 2*4; // scale is 2 32bit floats (4bytes each)
    const staticStorageBufferSize = kNumObjects * staticBufferSize;
    const scaleStorageBufferSize = kNumObjects * scaleSize;

    const staticStorageBuffer = device.createBuffer({
            label: `static storage for objects`,
            size: staticStorageBufferSize,
            usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
        });

    const scaleStorageBuffer = device.createBuffer({
        label: `scale changing storage for objects`,
        size: scaleStorageBufferSize,
        usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
    });

    // offsets to the various uniform values in float32 indices
    const kColorOffset = 0;
    const kOffsetOffset = 4;
    const kScaleOffset = 0;

    const staticStorageValues = new Float32Array(staticStorageBufferSize/4);

    for (let i = 0; i < kNumObjects; ++i) {
        const staticOffset = i * (staticBufferSize/4);
        staticStorageValues.set([util.rand(), util.rand(), util.rand(), 1], staticOffset + kColorOffset);        // set the color
        // 向画布左侧移动 1/4，向下移动 1/8（剪辑空间从 -1 到 1 的宽度为 2 个单位，因此 0.25 是 2 的 1/8）。
        staticStorageValues.set([util.rand(-0.9, 0.9), util.rand(-0.9, 0.9)], staticOffset + kOffsetOffset);      // set the offset

        objectInfos.push({
            scale: util.rand(0.2, 0.5),
        });

    }
    // copy values to the GPU
    device.queue.writeBuffer(staticStorageBuffer, 0, staticStorageValues);


    // create a typedarray to hold the values for the storage in JavaScript
    const scaleValues = new Float32Array(scaleStorageBufferSize / 4);

    // 创建一个 bindGroup，将 storageBuffer 绑定到着色器中的 @group(0) @binding(0)
    // LXQ: 虽然都是@group(0) @bingding(0),但会在pass.setBindGroup()中
    const bindGroup = device.createBindGroup({
        label: `bind group for objs`,
        // 0 对应着着色器中的 @group(0)
        layout: pipeline.getBindGroupLayout(0),
        entries: [
            { binding: 0, resource: { buffer: staticStorageBuffer }},
            { binding: 1, resource: { buffer: scaleStorageBuffer }},
            // vertex buffer 其从绑定组中移除
            // { binding: 2, resource: { buffer: vertexBuffer }},
        ],
    });

    const renderPassDescriptor = {
        label: 'our basic canvas renderPass',
        colorAttachments: [
            {
                clearValue: [0.3, 0.3, 0.3, 1],
                // load意思是将纹理的现有内容加载到 GPU 中
                loadOp: 'clear', // 指定在绘制前将纹理清除为clearValue
                // storeOp: 'store'表示存储绘制结果。也可儿童discard来丢弃绘制的结果
                storeOp: 'store', // 
            },
        ],
    };
    
    function render() {
        renderPassDescriptor.colorAttachments[0].view =
            context.getCurrentTexture().createView();

        // LXQ: 把所有要绘制的内容放到一个命令缓冲区中，以提高效率
        // encoder和pass对象只是将命令编码到命令缓冲区中
        const encoder = device.createCommandEncoder({ label: 'our encoder' });
        const pass = encoder.beginRenderPass(renderPassDescriptor);
        pass.setPipeline(pipeline);
        // 告诉 webgpu 使用哪个顶点缓冲区
        pass.setVertexBuffer(0, vertexBuffer); // 0 相当于上面指定的渲染管道buffers数组的第一个元素。

        // Set the uniform values in our JavaScript side Float32Array
        const aspect = canvas.width / canvas.height;

        objectInfos.forEach(({scale}, idx) => {
            const offset = idx * (scaleSize/4); 
            scaleValues.set([scale / aspect, scale], offset + kScaleOffset); // set the scale
        });
        // upload all scales at once
        device.queue.writeBuffer(scaleStorageBuffer, 0, scaleValues);

        pass.setBindGroup(0, bindGroup);
        // 对于每个实例，WebGPU 将调用顶点着色器 numVertices 次，
        // vertex_index 设置为 0、1、2，instance_index 设置为 0、kNumObjects - 1。
        pass.draw(numVertices, kNumObjects); 

        pass.end();

        const commandBuffer = encoder.finish();
        device.queue.submit([commandBuffer]);
    }

    // ResizeObserver 接口监视 Element 内容盒或边框盒或者 SVGElement 边界尺寸的变化。
    // ResizeObserver 避免了通过回调函数调整大小时，通常创建的无限回调循环和循环依赖项。
    const observer = new ResizeObserver(entries => {
        for (const entry of entries) {
            const canvas = entry.target;
            const width = entry.contentBoxSize[0].inlineSize;
            const height = entry.contentBoxSize[0].blockSize;
            canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
            canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));

            render();
        }
    });

    observer.observe(canvas);
}



main();