// deferred_compute.go
package compute

import (
	"fmt"
	"log"
	"wgmat/gpucore"

	"github.com/cogentcore/webgpu/wgpu"
)

// DeferredCompute 支持批处理多个 GPU 计算操作
type DeferredCompute struct {
	ctx     gpucore.GPUContext
	encoder *wgpu.CommandEncoder

	// 用于跟踪临时资源（如 uniform buffers），确保 Submit 前不释放
	tempUniforms []*wgpu.Buffer

	// 标记是否已提交
	submitted bool
}

// NewDeferredCompute 创建延迟计算上下文
func NewDeferredCompute(ctx gpucore.GPUContext) (*DeferredCompute, error) {
	encoder, err := ctx.Device().CreateCommandEncoder(nil)
	if err != nil {
		return nil, fmt.Errorf("create command encoder: %w", err)
	}
	return &DeferredCompute{
		ctx:       ctx,
		encoder:   encoder,
		submitted: false,
	}, nil
}

func (dc *DeferredCompute) Add(a, b MatrixRef) (MatrixRef, error) {
	if a.Rows != b.Rows || a.Cols != b.Cols {
		return MatrixRef{}, fmt.Errorf("matrix dimension mismatch")
	}
	n := a.Rows * a.Cols
	outBuf, err := dc.ctx.CreateBuffer(make([]float32, n), gpucore.BufferUsageStorage|gpucore.BufferUsageCopySrc)
	if err != nil {
		return MatrixRef{}, err
	}

	kernel, ok := dc.ctx.GetKernel("add")
	if !ok {
		outBuf.Release()
		return MatrixRef{}, fmt.Errorf("kernel 'add' not registered")
	}

	// 修复：正确计算 grid size
	gridX := (n + 255) / 256 // Ceil(n/256)
	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{gridX, 1, 1}, []gpucore.GPUBuffer{a.Buffer, b.Buffer, outBuf}, nil)
	if err != nil {
		outBuf.Release()
		return MatrixRef{}, err
	}

	return MatrixRef{Buffer: outBuf, Rows: a.Rows, Cols: a.Cols}, nil
}

func (dc *DeferredCompute) GEMM(a, b MatrixRef, params GemmParams) (MatrixRef, error) {
	if a.Cols != b.Rows || a.Rows != params.M || a.Cols != params.K || b.Cols != params.N {
		return MatrixRef{}, fmt.Errorf("invalid GEMM dimensions")
	}
	outBuf, err := dc.ctx.CreateBuffer(make([]float32, params.M*params.N), gpucore.BufferUsageStorage|gpucore.BufferUsageCopySrc)
	if err != nil {
		return MatrixRef{}, err
	}
	uniformContent := []uint32{params.M, params.N, params.K, 0}
	uniformBytes := wgpu.ToBytes(uniformContent)
	alignedUniform := gpucore.AlignUniform(uniformBytes)
	uniformBuf, err := dc.ctx.Device().CreateBufferInit(&wgpu.BufferInitDescriptor{
		Contents: alignedUniform,
		Usage:    wgpu.BufferUsageUniform,
	})
	if err != nil {
		outBuf.Release()
		return MatrixRef{}, fmt.Errorf("create uniform buffer: %w", err)
	}
	dc.tempUniforms = append(dc.tempUniforms, uniformBuf)

	kernel, ok := dc.ctx.GetKernel("gemm")
	if !ok {
		outBuf.Release()
		uniformBuf.Release()
		return MatrixRef{}, fmt.Errorf("kernel 'gemm' not registered")
	}

	// 修复：正确计算 grid size
	gridX := (params.N + 15) / 16 // Ceil(N/16)
	gridY := (params.M + 15) / 16 // Ceil(M/16)
	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{gridX, gridY, 1}, []gpucore.GPUBuffer{a.Buffer, b.Buffer, outBuf}, []*wgpu.Buffer{uniformBuf})
	if err != nil {
		outBuf.Release()
		uniformBuf.Release()
		return MatrixRef{}, err
	}
	return MatrixRef{Buffer: outBuf, Rows: params.M, Cols: params.N}, nil
}

// LUFactorize 移除立即读取，不检查奇异性
// LUFactorize 不提交，只调度
func (dc *DeferredCompute) LUFactorize(a MatrixRef) (luRef MatrixRef, pivBuf gpucore.GPUBuffer, err error) {
	if err := dc.ensureEncoder(); err != nil {
		return MatrixRef{}, nil, err
	}

	if a.Rows != a.Cols {
		return MatrixRef{}, nil, fmt.Errorf("LU requires square matrix")
	}
	n := a.Rows

	log.Printf("LU: 开始分解 %dx%d 矩阵", n, n)

	// 创建 pivot buffer（初始化为恒等置换）
	pivBuf, err = dc.ctx.CreateBuffer(make([]uint32, n), gpucore.BufferUsageStorage|gpucore.BufferUsageCopySrc)
	if err != nil {
		return MatrixRef{}, nil, fmt.Errorf("create pivot buffer: %w", err)
	}

	// 创建 LU buffer（复制输入矩阵）
	aData, err := a.Buffer.Read()
	if err != nil {
		pivBuf.Release()
		return MatrixRef{}, nil, fmt.Errorf("read input matrix: %w", err)
	}
	luBuf, err := dc.ctx.CreateBuffer(aData, gpucore.BufferUsageStorage|gpucore.BufferUsageCopySrc)
	if err != nil {
		pivBuf.Release()
		return MatrixRef{}, nil, fmt.Errorf("create LU buffer: %w", err)
	}

	// 调度 LU kernel
	kernel, ok := dc.ctx.GetKernel("lu_full")
	if !ok {
		luBuf.Release()
		pivBuf.Release()
		return MatrixRef{}, nil, fmt.Errorf("lu_full kernel not registered")
	}

	uniform := gpucore.AlignUniform(wgpu.ToBytes([]uint32{n, 0, 0, 0}))
	ub, err := dc.ctx.Device().CreateBufferInit(&wgpu.BufferInitDescriptor{
		Contents: uniform,
		Usage:    wgpu.BufferUsageUniform,
	})
	if err != nil {
		luBuf.Release()
		pivBuf.Release()
		return MatrixRef{}, nil, fmt.Errorf("uniform buffer: %w", err)
	}
	dc.tempUniforms = append(dc.tempUniforms, ub)

	log.Printf("LU: 调度 kernel，grid=(1,1,1)")
	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{1, 1, 1}, []gpucore.GPUBuffer{luBuf, pivBuf}, []*wgpu.Buffer{ub})
	if err != nil {
		luBuf.Release()
		pivBuf.Release()
		return MatrixRef{}, nil, fmt.Errorf("encode lu_full: %w", err)
	}

	// 【关键】不提交，保持 encoder 活跃
	luRef = MatrixRef{Buffer: luBuf, Rows: n, Cols: n, owns: true}
	luRef.PivotBuf = pivBuf
	return luRef, pivBuf, nil
}

// CheckSingularity 在 Submit 后调用
func (dc *DeferredCompute) CheckSingularity(pivBuf gpucore.GPUBuffer) error {
	if pivBuf == nil {
		return fmt.Errorf("pivot buffer is nil")
	}

	// 确保 GPU 完成
	dc.ctx.Wait()

	pivResult, err := pivBuf.ReadAsUint32()
	if err != nil {
		return fmt.Errorf("read pivot buffer: %w", err)
	}
	if len(pivResult) == 0 {
		return fmt.Errorf("pivot buffer is empty")
	}

	log.Printf("CheckSingularity: pivot = %v (first=0x%X)", pivResult[:min(3, len(pivResult))], pivResult[0])

	if pivResult[0] == 0xFFFFFFFF {
		return fmt.Errorf("matrix is singular")
	}
	return nil
}

func min(a, b int) int {
	if a < b {
		return a
	}
	return b
}

// SolveGPU solves Ax = b using LU + pivoting, all on GPU.
// - lu: in-place LU matrix (f32[n*n])
// - pivBuf: pivot vector (u32[n], GPU buffer)
// - b: RHS vector (f32[n])
// Returns x (f32[n]) as MatrixRef (n×1)
// SolveGPU 修复 encoder 检查
func (dc *DeferredCompute) SolveGPU(lu, b MatrixRef, pivBuf gpucore.GPUBuffer) (MatrixRef, error) {
	if err := dc.ensureEncoder(); err != nil {
		return MatrixRef{}, err
	}

	n := lu.Rows
	if b.Rows != n || b.Cols != 1 {
		return MatrixRef{}, fmt.Errorf("b must be %dx1", n)
	}

	if pivBuf == nil {
		return MatrixRef{}, fmt.Errorf("pivot buffer is nil")
	}

	log.Printf("Solve: 解 %dx%d 方程组", n, n)

	// Step 1: Apply permutation
	pbBuf, err := dc.ctx.CreateBuffer(make([]float32, n), gpucore.BufferUsageStorage)
	if err != nil {
		return MatrixRef{}, fmt.Errorf("create Pb buffer: %w", err)
	}

	kernel, ok := dc.ctx.GetKernel("apply_pivot")
	if !ok {
		pbBuf.Release()
		return MatrixRef{}, fmt.Errorf("apply_pivot kernel not registered")
	}
	uniform := gpucore.AlignUniform(wgpu.ToBytes([]uint32{n, 0, 0, 0}))
	ub, err := dc.ctx.Device().CreateBufferInit(&wgpu.BufferInitDescriptor{
		Contents: uniform,
		Usage:    wgpu.BufferUsageUniform,
	})
	if err != nil {
		pbBuf.Release()
		return MatrixRef{}, fmt.Errorf("uniform for apply_pivot: %w", err)
	}
	dc.tempUniforms = append(dc.tempUniforms, ub)

	// 使用正确的 grid size
	gridX := (n + 255) / 256
	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{gridX, 1, 1},
		[]gpucore.GPUBuffer{b.Buffer, pivBuf, pbBuf}, []*wgpu.Buffer{ub})
	if err != nil {
		pbBuf.Release()
		return MatrixRef{}, fmt.Errorf("encode apply_pivot: %w", err)
	}

	// Step 2: Forward substitution
	yBuf, err := dc.ctx.CreateBuffer(make([]float32, n), gpucore.BufferUsageStorage)
	if err != nil {
		pbBuf.Release()
		return MatrixRef{}, fmt.Errorf("create y buffer: %w", err)
	}

	kernel, ok = dc.ctx.GetKernel("forward_sub")
	if !ok {
		pbBuf.Release()
		yBuf.Release()
		return MatrixRef{}, fmt.Errorf("forward_sub kernel not registered")
	}
	uniform = gpucore.AlignUniform(wgpu.ToBytes([]uint32{n, 0, 0, 0}))
	ub, err = dc.ctx.Device().CreateBufferInit(&wgpu.BufferInitDescriptor{
		Contents: uniform,
		Usage:    wgpu.BufferUsageUniform,
	})
	if err != nil {
		pbBuf.Release()
		yBuf.Release()
		return MatrixRef{}, fmt.Errorf("uniform for forward_sub: %w", err)
	}
	dc.tempUniforms = append(dc.tempUniforms, ub)

	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{1, 1, 1},
		[]gpucore.GPUBuffer{lu.Buffer, pbBuf, yBuf}, []*wgpu.Buffer{ub})
	if err != nil {
		pbBuf.Release()
		yBuf.Release()
		return MatrixRef{}, fmt.Errorf("encode forward_sub: %w", err)
	}

	// Step 3: Backward substitution
	xBuf, err := dc.ctx.CreateBuffer(make([]float32, n), gpucore.BufferUsageStorage|gpucore.BufferUsageCopySrc)
	if err != nil {
		pbBuf.Release()
		yBuf.Release()
		return MatrixRef{}, fmt.Errorf("create x buffer: %w", err)
	}

	kernel, ok = dc.ctx.GetKernel("backward_sub")
	if !ok {
		pbBuf.Release()
		yBuf.Release()
		xBuf.Release()
		return MatrixRef{}, fmt.Errorf("backward_sub kernel not registered")
	}
	uniform = gpucore.AlignUniform(wgpu.ToBytes([]uint32{n, 0, 0, 0}))
	ub, err = dc.ctx.Device().CreateBufferInit(&wgpu.BufferInitDescriptor{
		Contents: uniform,
		Usage:    wgpu.BufferUsageUniform,
	})
	if err != nil {
		pbBuf.Release()
		yBuf.Release()
		xBuf.Release()
		return MatrixRef{}, fmt.Errorf("uniform for backward_sub: %w", err)
	}
	dc.tempUniforms = append(dc.tempUniforms, ub)

	err = kernel.EncodeDispatch(dc.encoder, [3]uint32{1, 1, 1},
		[]gpucore.GPUBuffer{lu.Buffer, yBuf, xBuf}, []*wgpu.Buffer{ub})
	if err != nil {
		pbBuf.Release()
		yBuf.Release()
		xBuf.Release()
		return MatrixRef{}, fmt.Errorf("encode backward_sub: %w", err)
	}

	// 不提交，保持 encoder 活跃
	pbBuf.Release()
	yBuf.Release()

	return MatrixRef{Buffer: xBuf, Rows: n, Cols: 1, owns: true}, nil
}

// Submit 提交所有操作，只能调用一次
func (dc *DeferredCompute) Submit() error {
	if dc.submitted {
		return fmt.Errorf("already submitted")
	}
	if dc.encoder == nil {
		return fmt.Errorf("encoder is nil")
	}

	cmd, err := dc.encoder.Finish(nil)
	if err != nil {
		return fmt.Errorf("finish command encoder: %w", err)
	}
	dc.encoder.Release()
	dc.encoder = nil
	dc.submitted = true

	defer cmd.Release()
	dc.ctx.Device().GetQueue().Submit(cmd)

	// 释放临时 uniform buffers
	for _, ub := range dc.tempUniforms {
		ub.Release()
	}
	dc.tempUniforms = nil

	return nil
}

// Close 释放资源（即使未 Submit）
func (dc *DeferredCompute) Close() {
	if dc.encoder != nil {
		dc.encoder.Release()
		dc.encoder = nil
	}
	for _, ub := range dc.tempUniforms {
		ub.Release()
	}
	dc.tempUniforms = nil
}
func (dc *DeferredCompute) Device() *wgpu.Device {
	return dc.ctx.Device()
}

// ensureEncoder 确保 encoder 有效
func (dc *DeferredCompute) ensureEncoder() error {
	if dc.submitted {
		return fmt.Errorf("cannot add operations after Submit")
	}
	if dc.encoder == nil {
		var err error
		dc.encoder, err = dc.ctx.Device().CreateCommandEncoder(nil)
		if err != nil {
			return fmt.Errorf("recreate encoder: %w", err)
		}
	}
	return nil
}
