import { Engine } from "../../Engine";
import { Register } from "../../register/index";
import { ComponentType } from "../../component/index";
import { mat4, vec3 } from "../../core/math/wgpu-matrix.module";
import { Pass } from "../../component/post/Pass";
import { guid } from "../../core/utils";
import {any} from '../../core/logic';

class LineUtile {
  constructor(options) {
    this.rawPosition = options.rawPosition;
    this.maxPointCount =any(options.maxPointCount,100)  ; // 插值点数
    this.offsetZ=any(options.offsetZ,0.1)
    this.init();
  }
  restore() {
    const camera = Engine.instance.scene.getCamera();
    let cameraParam = camera.getParam("camera");
    cameraParam.buffer(this.rowCameraData);
  }
  getDescript() {
    if (this._descript) {
      if (
        this._descript.size[0] !== Engine.instance.size[0] ||
        this._descript.size[1] !== Engine.instance.size[1]
      ) {
        this._descript = null;
      }
    }
    // 多采样的话，输出结果在rosolveTarget上，但是单采样时在view上
    if (!this._descript) {
      this.colorTexture = Engine.instance.device.createTexture({
        label: "colorTexture",
        size: Engine.instance.size,
        format: Engine.instance.format,
        mipLevelCount: 1,
        sampleCount: Engine.instance.multisample ? 4 : 1,
        usage:
          GPUTextureUsage.RENDER_ATTACHMENT |
          GPUTextureUsage.TEXTURE_BINDING |
          GPUTextureUsage.COPY_SRC,
      });
      this.colorTexture.view = this.colorTexture.createView();
      this.depthTexture = Engine.instance.device.createTexture({
        label: "depthTexture",
        size: Engine.instance.size,
        format: "depth24plus",
        sampleCount: Engine.instance.multisample ? 4 : 1,
        usage:
          GPUTextureUsage.RENDER_ATTACHMENT |
          GPUTextureUsage.TEXTURE_BINDING |
          GPUTextureUsage.COPY_SRC,
      });
      this.depthTexture.view = this.depthTexture.createView();
      this._descript = {
        colorAttachments: [
          {
            view: this.colorTexture.view,
            // resolveTarget:Engine.instance.context.getCurrentTexture().createView(),
            loadOp: "clear",
            clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 1.0 },
            storeOp: "store",
          },
        ],
        depthStencilAttachment: {
          view: this.depthTexture.view,
          depthClearValue: 1.0,
          depthLoadOp: "clear",
          depthStoreOp: "store",
        },
      };
      if (Engine.instance.enableGbuffer) {
        this.positionTexture = Engine.instance.device.createTexture({
          label: "position",
          size: Engine.instance.size,
          format: "rgba16float",
          mipLevelCount: 1,
          sampleCount: 1,
          usage:
            GPUTextureUsage.RENDER_ATTACHMENT |
            GPUTextureUsage.TEXTURE_BINDING |
            GPUTextureUsage.COPY_SRC,
        });
        this.positionTexture.view = this.positionTexture.createView();
        this.NomalTexture = Engine.instance.device.createTexture({
          label: "position",
          size: Engine.instance.size,
          format: "rgba16float",
          mipLevelCount: 1,
          sampleCount: 1,
          usage:
            GPUTextureUsage.RENDER_ATTACHMENT |
            GPUTextureUsage.TEXTURE_BINDING |
            GPUTextureUsage.COPY_SRC,
        });
        this.NomalTexture.view = this.NomalTexture.createView();
        if (Engine.instance.multisample) {
          this.positionTextureMulti = Engine.instance.device.createTexture({
            label: "position",
            size: Engine.instance.size,
            format: "rgba16float",
            mipLevelCount: 1,
            sampleCount: 4,
            usage:
              GPUTextureUsage.RENDER_ATTACHMENT |
              GPUTextureUsage.TEXTURE_BINDING |
              GPUTextureUsage.COPY_SRC,
          });
          this.positionTextureMulti.view =
            this.positionTextureMulti.createView();
          this.NomalTextureMulti = Engine.instance.device.createTexture({
            label: "position",
            size: Engine.instance.size,
            format: "rgba16float",
            mipLevelCount: 1,
            sampleCount: 4,
            usage:
              GPUTextureUsage.RENDER_ATTACHMENT |
              GPUTextureUsage.TEXTURE_BINDING |
              GPUTextureUsage.COPY_SRC,
          });
          this.NomalTextureMulti.view = this.NomalTextureMulti.createView();
        }
        this._descript.colorAttachments.push({
          label: "position",
          view: Engine.instance.multisample
            ? this.positionTextureMulti.view
            : this.positionTexture.view,
          resolveTarget: Engine.instance.multisample
            ? this.positionTexture.view
            : undefined,
          loadOp: "clear",
          clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 0 },
          storeOp: "store",
        });
        this._descript.colorAttachments.push({
          label: "normal",
          view: Engine.instance.multisample
            ? this.NomalTextureMulti.view
            : this.NomalTexture.view,
          resolveTarget: Engine.instance.multisample
            ? this.NomalTexture.view
            : undefined,
          loadOp: "clear",
          clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 0 },
          storeOp: "store",
        });
      }
      if (Engine.instance.enablePick) {
        this.pickTexture = Engine.instance.device.createTexture({
          label: "pick",
          size: Engine.instance.size,
          format: Engine.instance.format,
          mipLevelCount: 1,
          sampleCount: 1,
          usage:
            GPUTextureUsage.RENDER_ATTACHMENT |
            GPUTextureUsage.TEXTURE_BINDING |
            GPUTextureUsage.COPY_SRC,
        });
        this.pickTexture.view = this.pickTexture.createView();
        if (Engine.instance.multisample) {
          this.pickTextureMulti = Engine.instance.device.createTexture({
            label: "pick",
            size: Engine.instance.size,
            format: Engine.instance.format,
            mipLevelCount: 1,
            sampleCount: 4,
            usage:
              GPUTextureUsage.RENDER_ATTACHMENT |
              GPUTextureUsage.TEXTURE_BINDING |
              GPUTextureUsage.COPY_SRC,
          });
          this.pickTextureMulti.view = this.pickTextureMulti.createView();
        }
        this._descript.colorAttachments.push({
          label: "pick",
          view: Engine.instance.multisample
            ? this.pickTextureMulti.view
            : this.pickTexture.view,
          resolveTarget: Engine.instance.multisample
            ? this.pickTexture.view
            : undefined,
          loadOp: "clear",
          clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 0 },
          storeOp: "store",
        });
      }
      this._descript.size = Engine.instance.size;
    }
    return this._descript;
  }
  init() {
    this.twoPoints = this.createLineSegment();
    this.getDescript();
    // 创建参数uniform buffer
    this.paramsBuffer = Engine.instance.device.createBuffer({
      label: "parameters",
      size: 24, // maxPointCount(4) + texSize(8) + maxPoints(4)
      usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
    });
    // 1. 创建输入缓冲区
    // const points=new Float32Array(this.rawPosition)
    const inputBuffer = Engine.instance.device.createBuffer({
      label: "inputBuffer",
      size: this.twoPoints.byteLength, // vec4 per point
      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
    });
    // 2. 创建输出缓冲区
    this.pointsCount = this.rawPosition.length / 3;
    const maxOutputPoints = (this.pointsCount - 1) * this.maxPointCount;
    this.outputBuffer = Engine.instance.device.createBuffer({
      size: maxOutputPoints * 16,
      usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
    });
    // 3. 更新参数
    const paramsData = new Float32Array([
      this.maxPointCount,
      this.offsetZ,
    ]);
    Engine.instance.device.queue.writeBuffer(this.paramsBuffer, 0, paramsData);
    // 4. 更新输入点
    Engine.instance.device.queue.writeBuffer(inputBuffer, 0, this.twoPoints);
    // 5、顶视图camera
    this.topCamera = Engine.instance.device.createBuffer({
      label: "topCamera",
      size: (16+16+4+4) * 4, // 16个矩阵元素 + 2个纹理尺寸
      usage:
        GPUBufferUsage.UNIFORM |
        GPUBufferUsage.COPY_SRC |
        GPUBufferUsage.COPY_DST,
    });
    const IDWShader = `
    struct Point {
        pos: vec4<f32>  // xyz是位置，w可以用作标记
    }
       
    struct OutputPoint {
        pos: vec3<f32>  // 输出点改为 vec3 格式
    }
    struct Camera {
        viewProj: mat4x4<f32>,    // 顶视图的视图投影矩阵
        viewProjInverse: mat4x4<f32>,
        texSize: vec4<f32>,       // 深度图尺寸
        nearFar:  vec4<f32>               // 近平面 远平面
    }

    struct Params {
        segmentPoints: f32,   // 每段固定的插值点数
        offsetZ:f32
    }

    @group(0) @binding(0) var depthTexture: texture_depth_multisampled_2d;
    @group(0) @binding(1) var<uniform> topCamera: Camera;
    @group(0) @binding(2) var<storage, read> inputPoints: array<Point>;
    @group(0) @binding(3) var<storage, read_write> outputPoints: array<Point>;
    @group(0) @binding(4) var<uniform> params: Params;

    // 获取世界空间点的高度
    fn sampleHeight(worldPos: vec2<f32>) -> f32 {
        // 转换到裁剪空间
        let clipPos = topCamera.viewProj * vec4<f32>(worldPos.x, worldPos.y, 0.0, 1.0);
        
        // 透视除法，转换到NDC空间 [-1, 1]
        let ndcPos = clipPos.xy / clipPos.w;
        
        // NDC转换到UV空间 [0, 1]
        let uv = vec2<f32>(
            (ndcPos.x + 1.0) * 0.5,
            (1.0 - (ndcPos.y + 1.0) * 0.5)  // Y轴翻转
        );
        
        // 转换到纹理坐标
        let texCoord = vec2<u32>(
            u32(uv.x * topCamera.texSize.x),
            u32(uv.y * topCamera.texSize.y)
        );

        // 4次采样并平均
        var avgDepth: f32 = 0.0;
        for (var i: u32 = 0u; i < 4u; i = i + 1u) {
            avgDepth += textureLoad(depthTexture, texCoord, i);
        }
        avgDepth = avgDepth / 4.0;

        // 6. 从深度重建世界空间Z坐标
        let clipZ = avgDepth * 2.0 - 1.0;  // 深度值转回NDC空间
        
        // 从NDC空间重建世界空间坐标
        let viewPos = vec4<f32>(
            ndcPos.x ,
            ndcPos.y ,
            avgDepth ,
            1.0
        );
              // 使用投影矩阵的逆矩阵重建世界空间坐标
        let worldPos4 = topCamera.viewProjInverse * viewPos;
        return (worldPos4.z / worldPos4.w)+params.offsetZ;  // 返回世界空间的Z坐标
    }

  
   // 修改插值函数
     fn interpolatePoints(p1: Point, p2: Point, segmentIndex: u32) {
        let baseIndex = segmentIndex * u32(params.segmentPoints);
        
        // 检查p1和p2是否是同一个点
        let isSamePoint = all(p1.pos.xy == p2.pos.xy);
        
        // 对第一个点采样深度
        let p1Height = sampleHeight(p1.pos.xy);
        outputPoints[baseIndex].pos = vec4<f32>(p1.pos.x, p1.pos.y, p1Height, 1.0);
        
        // 如果是同一个点，所有插值点都使用相同的采样高度
        if (isSamePoint) {
            for (var i = 1u; i < u32(params.segmentPoints); i = i + 1u) {
                outputPoints[baseIndex + i].pos = vec4<f32>(p1.pos.x, p1.pos.y, p1Height, 1.0);
            }
            return;
        }
        
        // 不是同一个点时的插值逻辑
        let dir = p2.pos.xy - p1.pos.xy;
        let segmentLength = length(dir);
        
        // 中间点的插值
        for (var i = 1u; i < u32(params.segmentPoints) - 1u; i = i + 1u) {
            let step = (segmentLength * f32(i)) / f32(u32(params.segmentPoints) - 1u);
            let t = step / segmentLength;
            let currentPos = p1.pos.xy + dir * t;
            let height = sampleHeight(currentPos);
            outputPoints[baseIndex + i].pos = vec4<f32>(currentPos.x, currentPos.y, height, 1.0);
        }
        
        // 对最后一个点采样深度
        if (segmentIndex < arrayLength(&inputPoints) - 2u) {
            let p2Height = sampleHeight(p2.pos.xy);
            outputPoints[baseIndex + u32(params.segmentPoints) - 1u].pos = 
                vec4<f32>(p2.pos.x, p2.pos.y, p2Height, 1.0);
        }
    }

    @compute @workgroup_size(256)
    fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
        let pointIndex = global_id.x;
        
        // 确保不是最后一个点
        if (pointIndex >= arrayLength(&inputPoints) - 1u) {
            return;
        }

        // 获取当前点和下一个点
        let currentPoint = inputPoints[pointIndex];
        let nextPoint = inputPoints[pointIndex + 1];
        
        // 计算插值点
        interpolatePoints(currentPoint, nextPoint, pointIndex);     
        // let height =sampleHeight(currentPoint.pos.xy);
        //  outputPoints[pointIndex].pos = vec4<f32>(
        //     currentPoint.pos.x,
        //     currentPoint.pos.y,
        //     height,
        //     1.0
        // );  
    }
`;
    this.computePipelineIDW = Engine.instance.device.createComputePipeline({
      label: "ComputePipelineIDW",
      layout: "auto",
      compute: {
        module: Engine.instance.device.createShaderModule({
          code: IDWShader,
        }),
        entryPoint: "main",
      },
    });
    this.IDWBindGroup = Engine.instance.device.createBindGroup({
      layout: this.computePipelineIDW.getBindGroupLayout(0),
      entries: [
        {
          binding: 0,
          resource: this.depthTexture.createView(),
        },
        {
          binding: 1,
          resource: {
            buffer: this.topCamera,
          },
        },
        {
          binding: 2,
          resource: {
            buffer: inputBuffer,
          },
        },
        {
          binding: 3,
          resource: {
            buffer: this.outputBuffer,
          },
        },
        {
          binding: 4,
          resource: {
            buffer: this.paramsBuffer,
          },
        },
      ],
    });

  }
  createLineSegment() {
    const lineSegmentData = [];
    for (let i = 0; i < this.rawPosition.length; i += 3) {
      const x = this.rawPosition[i];
      const y = this.rawPosition[i + 1];
      const z = this.rawPosition[i + 2];
      // const x1 = this.rawPosition[i + 3];
      // const y1 = this.rawPosition[i + 4];
      // const z1 = this.rawPosition[i + 5];
      lineSegmentData.push(x, y, z, 1.0);
    }
    return new Float32Array(lineSegmentData);
  }
  async update() {
    // 相机矩阵
    const camera = Engine.instance.scene.getCamera();
    const eye = vec3.fromValues(camera.at.x, camera.at.y, camera.distance);
    const up = vec3.fromValues(0, 1, 0);
    const target = vec3.fromValues(camera.at.x, camera.at.y,0);
    this.viewMatrix = mat4.lookAt(eye, target, up);
    this.projMatrix = camera.project.elements;
    this.viewProjMatrix = mat4.multiply(this.projMatrix, this.viewMatrix);
    this.viewProjInverseMatrix=mat4.inverse(this.viewProjMatrix)
    let cameraParam = camera.getParam("camera");
    this.rowCameraData = new Float32Array([
      ...camera.vp.elements,
      ...camera.vpInvert.elements,
      ...camera.view.elements,
      ...camera.project.elements,
      ...camera.rotation.elements,
      ...camera.trans.matrixWorld.elements,
      camera.trans.position.x,
      camera.trans.position.y,
      camera.trans.position.z,
      camera.distance,
      ...camera.size,
      camera.near,
      camera.far,
      ...camera.at,
    ]);
    this.newCameraData = new Float32Array([
      ...this.viewProjMatrix,
      ...camera.vpInvert.elements,
      ...this.viewMatrix,
      ...this.projMatrix,
      ...camera.rotation.elements,
      ...camera.trans.matrixWorld.elements,
      ...eye,
      camera.distance,
      ...camera.size,
      camera.near,
      camera.far,
      ...target,
    ]);
    // 更新相机矩阵信息渲染深度图
    cameraParam.buffer(this.newCameraData);
    const meshRednerCom = Register.instance
      .manager(ComponentType.MeshRender)
      .get("x");
    const bundles = meshRednerCom.bundles;
    let commandEncoder = Engine.instance.device.createCommandEncoder();
    this.renderPassDescriptor = this.getDescript();
    const renderPass = commandEncoder.beginRenderPass(
      this.renderPassDescriptor,
    );
    renderPass.executeBundles(bundles);
    renderPass.end();
    let cbf = commandEncoder.finish();
    Engine.instance.queue.submit([cbf]);
    this.restore();
    const topCameraData = new Float32Array([
      ...this.viewProjMatrix,
      ... this.viewProjInverseMatrix,
      ...Engine.instance.size,0,0,
      camera.near, camera.far,0,0
    ]);
    Engine.instance.queue.writeBuffer(this.topCamera, 0, topCameraData);
    const IDwCommandEncoder = Engine.instance.device.createCommandEncoder();
    const computePass = IDwCommandEncoder.beginComputePass();
    computePass.setPipeline(this.computePipelineIDW);
    computePass.setBindGroup(0, this.IDWBindGroup);
    const workgroupCount = Math.ceil((this.pointsCount - 1) / 256);
    computePass.dispatchWorkgroups(workgroupCount);
    computePass.end();
    const outputBuffer1 = engine.device.createBuffer({
      size: this.outputBuffer.size,
      usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
    });
    IDwCommandEncoder.copyBufferToBuffer(
      this.outputBuffer,
      0,
      outputBuffer1,
      0,
      this.outputBuffer.size,
    );
    Engine.instance.device.queue.submit([IDwCommandEncoder.finish()]);
    await outputBuffer1.mapAsync(GPUMapMode.READ);
    const out = new Float32Array(outputBuffer1.getMappedRange());
    out.step=4
    console.log(out,"out");
    return out;
    // // 获取插值数量
    //  const topCameraData=new Float32Array([...this.viewProjMatrix,...camera.size])
    //  Engine.instance.queue.writeBuffer(
    //      this.topCamera,
    //      0,
    //      topCameraData
    //  );
    //
    //  const IDwCommandEncoder = Engine.instance.device.createCommandEncoder();
    //  const IDWcomputePass = IDwCommandEncoder.beginComputePass();
    //  IDWcomputePass.setPipeline(this.computePipelineIDWCount);
    //  IDWcomputePass.setBindGroup(0, this.IDWCountBindGroup);
    //  IDWcomputePass.dispatchWorkgroups(this.IDWworkgroupCount);
    //  IDWcomputePass.end();
    //  const outputBuffer = engine.device.createBuffer({
    //    size: this.sampleCountsBuffer.size,
    //    usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
    //  });
    //  IDwCommandEncoder.copyBufferToBuffer(this.sampleCountsBuffer, 0, outputBuffer, 0,  this.sampleCountsBuffer.size);
    //  const commands = IDwCommandEncoder.finish({ label: "IDW" });
    //  Engine.instance.queue.submit([commands]);
    //  await outputBuffer.mapAsync(GPUMapMode.READ);
    //  const out = new Uint32Array(outputBuffer.getMappedRange());
    //  console.log(out);
  }
  lineToVolume(rowPosition){

  }
}
export { LineUtile };
