<template>
  <div>123123
    <canvas id="outputCanvas" width="512" height="512"></canvas>
    <input type="file" id="imageInput" accept="image/*" />上传文件
  </div>
</template>

<script lang="js" setup>

// 初始化WebGPU
const { device, context, format } = await initWebGPU();

// 创建处理管线
const pipeline = await createImageProcessingPipeline(device, format);

// 文件输入处理
document.getElementById('imageInput').addEventListener('change', async (e) => {
  const file = e.target.files[0];
  const imageBitmap = await createImageBitmap(file);

  // 处理图像
  const bindGroup = await processImage(device, pipeline, imageBitmap);

  // 执行渲染
  await renderFrame(device, context, pipeline, bindGroup);
});

/**
 * 初始化WebGpu
 * @returns {Promise<{context: *, format: *, device: *}>}
 */
async function initWebGPU() {
  // 检测浏览器支持
  if (!navigator.gpu) {
    throw Error("WebGPU not supported!");
  }
  // 获取适配器
  const adapter = await navigator.gpu.requestAdapter();
  if (!adapter) {
    throw Error("无法请求 WebGPU 适配器。");
  }
  // 获取设备
  const device = await adapter.requestDevice();

  // 创建Canvas上下文
  const canvas = document.getElementById('outputCanvas');
  const context = canvas.getContext('webgpu');

  // 配置Canvas格式
  const format = navigator.gpu.getPreferredCanvasFormat();
  context.configure({
    device: device,
    format: format,
    alphaMode: 'opaque'
  });

  return { device, context, format };
}

/**
 * 图像处理管线配置
 * @param device
 * @param format
 * @returns {Promise<*>}
 */
async function createImageProcessingPipeline(device, format) {
  // 创建渲染管线
  return device.createRenderPipeline({
    layout: 'auto',
    vertex: {
      module: device.createShaderModule({
        code: `
                    @vertex
                    fn vs_main(@builtin(vertex_index) vertex_index: u32) -> @builtin(position) vec4<f32> {
                        const pos = array(
                            vec2(-1.0, -1.0),
                            vec2(1.0, -1.0),
                            vec2(-1.0, 1.0),
                            vec2(1.0, 1.0)
                        );
                        return vec4(pos[vertex_index], 0.0, 1.0);
                    }
                `
      }),
      entryPoint: 'vs_main'
    },
    fragment: {
      module: device.createShaderModule({
        code: `
                    @group(0) @binding(0) var inputTex: texture_2d<f32>;
                    @group(0) @binding(1) var samp: sampler;

                    @fragment
                    fn fs_main(@builtin(position) fragCoord: vec4<f32>) -> @location(0) vec4<f32> {
                        let uv = fragCoord.xy / vec2(512.0, 512.0);

                        // 灰度转换示例
                        let color = textureSample(inputTex, samp, uv);
                        let gray = 0.299 * color.r + 0.587 * color.g + 0.114 * color.b;

                        return vec4(vec3(gray), 1.0);
                    }
                `
      }),
      entryPoint: 'fs_main',
      targets: [{format: format}]
    },
    primitive: {
      topology: 'triangle-strip'
    }
  });
}

/**
 * 图像加载处理
 * @param device
 * @param pipeline
 * @param imageBitmap
 * @returns {Promise<*>}
 */
async function processImage(device, pipeline, imageBitmap) {
  // 创建输入纹理
  const inputTexture = device.createTexture({
    size: [imageBitmap.width, imageBitmap.height],
    format: 'rgba8unorm',
    usage: GPUTextureUsage.TEXTURE_BINDING |
        GPUTextureUsage.COPY_DST
  });

  // 上传图像数据
  device.queue.copyExternalImageToTexture(
      { source: imageBitmap },
      { texture: inputTexture },
      [imageBitmap.width, imageBitmap.height]
  );

  // 创建采样器
  const sampler = device.createSampler({
    magFilter: 'linear',
    minFilter: 'linear'
  });

  // 创建绑定组
  return device.createBindGroup({
    layout: pipeline.getBindGroupLayout(0),
    entries: [
      {
        binding: 0,
        resource: inputTexture.createView()
      },
      {
        binding: 1,
        resource: sampler
      }
    ]
  });
}

/**
 * 执行渲染
 * @param device
 * @param context
 * @param pipeline
 * @param bindGroup
 * @returns {Promise<void>}
 */
async function renderFrame(device, context, pipeline, bindGroup) {
  const commandEncoder = device.createCommandEncoder();
  const renderPass = commandEncoder.beginRenderPass({
    colorAttachments: [{
      view: context.getCurrentTexture().createView(),
      loadOp: 'clear',
      storeOp: 'store',
    }]
  });

  renderPass.setPipeline(pipeline);
  renderPass.setBindGroup(0, bindGroup);
  renderPass.draw(4); // 绘制全屏四边形
  renderPass.end();

  device.queue.submit([commandEncoder.finish()]);
}

</script>