package api

import (
	"fmt"
	"wgmat/pkg/core"

	"github.com/cogentcore/webgpu/wgpu"
)

// 辅助函数：获取或创建加法kernel（复用缓存）
func getOrCreateAddKernel(ctx *core.Context) (*core.Kernel, error) {
	kernelKey := "matrix_add"
	kernelCache := ctx.KernelCache()

	if cached, ok := kernelCache.Load(kernelKey); ok {
		return cached.(*core.Kernel), nil
	}

	kernel, err := createAddKernel(ctx)
	if err != nil {
		return nil, err
	}

	kernelCache.Store(kernelKey, kernel)
	return kernel, nil
}

// 辅助函数：获取或创建乘法kernel
func getOrCreateMulKernel(ctx *core.Context) (*core.Kernel, error) {
	kernelKey := "matrix_mul"
	kernelCache := ctx.KernelCache()

	if cached, ok := kernelCache.Load(kernelKey); ok {
		return cached.(*core.Kernel), nil
	}

	kernel, err := createMulKernel(ctx)
	if err != nil {
		return nil, err
	}

	kernelCache.Store(kernelKey, kernel)
	return kernel, nil
}

// 创建加法kernel（完全复用原逻辑）
func createAddKernel(ctx *core.Context) (*core.Kernel, error) {
	wgslCode := `struct Params {
	rows: u32,
	cols: u32,
	total_elements: u32,
	padding: u32,
}

@group(0) @binding(0) var<storage, read> A: array<f32>;
@group(0) @binding(1) var<storage, read> B: array<f32>;
@group(0) @binding(2) var<storage, read_write> C: array<f32>;
@group(0) @binding(3) var<uniform> params: Params;

@compute @workgroup_size(16, 16, 1)
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
	let row = global_id.y;
	let col = global_id.x;
	let idx = row * params.cols + col;
	
	if idx >= params.total_elements {
		return;
	}
	
	C[idx] = A[idx] + B[idx];
}`

	return createKernelFromWGSL(ctx, "add_shader", wgslCode, []wgpu.BindGroupLayoutEntry{
		{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
	})
}

// 创建乘法kernel（新增操作示例）
func createMulKernel(ctx *core.Context) (*core.Kernel, error) {
	wgslCode := `struct Params {
	rowsA: u32,
	colsA: u32,
	colsB: u32,
	padding: u32,
}

@group(0) @binding(0) var<storage, read> A: array<f32>;
@group(0) @binding(1) var<storage, read> B: array<f32>;
@group(0) @binding(2) var<storage, read_write> C: array<f32>;
@group(0) @binding(3) var<uniform> params: Params;

@compute @workgroup_size(16, 16, 1)
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
	let row = global_id.y;
	let col = global_id.x;
	
	if row >= params.rowsA || col >= params.colsB {
		return;
	}
	
	var sum = 0.0;
	for (var k = 0u; k < params.colsA; k = k + 1) {
		let idxA = row * params.colsA + k;
		let idxB = k * params.colsB + col;
		sum = sum + A[idxA] * B[idxB];
	}
	
	let idxC = row * params.colsB + col;
	C[idxC] = sum;
}`

	return createKernelFromWGSL(ctx, "mul_shader", wgslCode, []wgpu.BindGroupLayoutEntry{
		{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
	})
}

func getOrCreateTiledMulKernel(ctx *core.Context, tileX, tileY int) (*core.Kernel, error) {
	kernelKey := fmt.Sprintf("matrix_mul_tiled_%d_%d", tileX, tileY)
	kernelCache := ctx.KernelCache()

	if cached, ok := kernelCache.Load(kernelKey); ok {
		return cached.(*core.Kernel), nil
	}

	kernel, err := createTiledMulKernel(ctx, tileX, tileY)
	if err != nil {
		return nil, err
	}

	kernelCache.Store(kernelKey, kernel)
	return kernel, nil
}
func createTiledMulKernel(ctx *core.Context, tileX, tileY int) (*core.Kernel, error) {
	// ✅ 预计算 tile 总大小，避免在 WGSL 中进行常量表达式乘法
	tileSize := tileX * tileY

	wgslCode := fmt.Sprintf(`
struct Params {
    rowsA: u32,
    colsA: u32,
    colsB: u32,
    padding: u32,
}

@group(0) @binding(0) var<storage, read> A: array<f32>;
@group(0) @binding(1) var<storage, read> B: array<f32>;
@group(0) @binding(2) var<storage, read_write> C: array<f32>;
@group(0) @binding(3) var<uniform> params: Params;

const TILE_X = %du;
const TILE_Y = %du;

// ✅ 使用预计算的常量作为数组长度
var<workgroup> As: array<f32, %du>;
var<workgroup> Bs: array<f32, %du>;

@compute @workgroup_size(%du, %du, 1u)
fn main(
    @builtin(global_invocation_id) global_id: vec3<u32>,
    @builtin(local_invocation_id) local_id: vec3<u32>,
    @builtin(workgroup_id) group_id: vec3<u32>
) {
    let row = group_id.y * TILE_Y + local_id.y;
    let col = group_id.x * TILE_X + local_id.x;

    var sum = 0.0;
    let kTiles = (params.colsA + TILE_X - 1u) / TILE_X;

    for (var t = 0u; t < kTiles; t++) {
        // Load A tile
        let aRow = row;
        let aCol = t * TILE_X + local_id.x;
        if aRow < params.rowsA && aCol < params.colsA {
            As[local_id.y * TILE_X + local_id.x] = A[aRow * params.colsA + aCol];
        } else {
            As[local_id.y * TILE_X + local_id.x] = 0.0;
        }

        // Load B tile
        let bRow = t * TILE_Y + local_id.y;
        let bCol = col;
        if bRow < params.colsA && bCol < params.colsB {
            Bs[local_id.y * TILE_X + local_id.x] = B[bRow * params.colsB + bCol];
        } else {
            Bs[local_id.y * TILE_X + local_id.x] = 0.0;
        }

        workgroupBarrier();

        // Compute partial dot product
        for (var k = 0u; k < TILE_X; k++) {
            sum += As[local_id.y * TILE_X + k] * Bs[k * TILE_X + local_id.x];
        }

        workgroupBarrier();
    }

    if row < params.rowsA && col < params.colsB {
        C[row * params.colsB + col] = sum;
    }
}
`, tileX, tileY, tileSize, tileSize, tileX, tileY) // ✅ 参数顺序：TILE_X, TILE_Y, tileSize, tileSize, workgroup_x, workgroup_y

	return createKernelFromWGSL(ctx, fmt.Sprintf("tiled_mul_%d_%d", tileX, tileY), wgslCode, []wgpu.BindGroupLayoutEntry{
		{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
		{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
	})
}

// 通用kernel创建函数（提取公共逻辑）
func createKernelFromWGSL(ctx *core.Context, label string, code string, entries []wgpu.BindGroupLayoutEntry) (*core.Kernel, error) {
	device := ctx.Device()

	shader, err := device.CreateShaderModule(&wgpu.ShaderModuleDescriptor{
		Label:          label,
		WGSLDescriptor: &wgpu.ShaderModuleWGSLDescriptor{Code: code},
	})
	if err != nil {
		return nil, err
	}
	defer shader.Release()

	bindGroupLayout, err := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Label:   label + "_layout",
		Entries: entries,
	})
	if err != nil {
		return nil, err
	}

	pipelineLayout, err := device.CreatePipelineLayout(&wgpu.PipelineLayoutDescriptor{
		BindGroupLayouts: []*wgpu.BindGroupLayout{bindGroupLayout},
	})
	if err != nil {
		bindGroupLayout.Release()
		return nil, err
	}
	defer pipelineLayout.Release()

	pipeline, err := device.CreateComputePipeline(&wgpu.ComputePipelineDescriptor{
		Label:  label + "_pipeline",
		Layout: pipelineLayout,
		Compute: wgpu.ProgrammableStageDescriptor{
			Module:     shader,
			EntryPoint: "main",
		},
	})
	if err != nil {
		bindGroupLayout.Release()
		return nil, err
	}

	return &core.Kernel{Pipeline: pipeline, Layout: bindGroupLayout}, nil
}

// 辅助函数：获取或创建QR分解kernel
func getOrCreateQRKernel(ctx *core.Context) (*core.Kernel, error) {
	kernelKey := "matrix_qr_householder"
	kernelCache := ctx.KernelCache()

	if cached, ok := kernelCache.Load(kernelKey); ok {
		return cached.(*core.Kernel), nil
	}

	kernel, err := createQRKernel(ctx)
	if err != nil {
		return nil, err
	}

	kernelCache.Store(kernelKey, kernel)
	return kernel, nil
}

// createQRKernel 创建Householder QR分解的WGSL内核
// 内核分为三个阶段：1) 计算Householder向量 2) 更新R 3) 累积Q
func createQRKernel(ctx *core.Context) (*core.Kernel, error) {
	wgslCode := `
// Kernel: qr_householder_optimized
struct Params {
    m: u32,
    n: u32,
    k: u32,
    currentCol: u32,
}

@group(0) @binding(0) var<storage, read_write> Q: array<f32>;
@group(0) @binding(1) var<storage, read_write> R: array<f32>;
@group(0) @binding(2) var<uniform> params: Params;
@group(0) @binding(3) var<storage, read_write> v: array<f32>;

// ✅ 改进1：使用更小、更灵活的workgroup
@compute @workgroup_size(128)
fn main(
    @builtin(global_invocation_id) gid: vec3<u32>,
    @builtin(local_invocation_id) lid: vec3<u32>,
) {
    let col = params.currentCol;
    let m = params.m;
    let n = params.n;
    let k = params.k;
    let row = gid.x;

    // ✅ 改进2：提前边界检查，避免无效计算
    if (row >= m) {
        return;
    }

    // === 阶段1：由线程0计算Householder向量 ===
    if (lid.x == 0u) {
        var x_norm_sq = 0.0;
        for (var i = col; i < m; i++) {
            let val = R[i * n + col];
            x_norm_sq += val * val;
        }
        
        let x_norm = sqrt(x_norm_sq);
        let x0 = R[col * n + col];

        // ✅ 改进3：增强数值稳定性检查
        if (x_norm < 1e-20 || abs(x0) < 1e-20) {
            v[0] = 0.0; // 标记：跳过此列
            v[m - col] = 1.0; // vTv设为1避免除零
            return;
        }

        let alpha = -sign(x0) * x_norm;
        
        // 构造向量v
        for (var i = col; i < m; i++) {
            let idx = i - col;
            if (i == col) {
                v[idx] = x0 - alpha; // v[0] = x0 - alpha
            } else {
                v[idx] = R[i * n + col];
            }
        }

        // 计算vTv并存储
        var vTv = 0.0;
        for (var i = col; i < m; i++) {
            let idx = i - col;
            vTv += v[idx] * v[idx];
        }
        v[m - col] = vTv;
    }

    // ✅ 改进4：添加工作组同步屏障（关键修复！）
    // 确保所有线程都能看到v和vTv的计算结果
    workgroupBarrier();

    // === 阶段2：所有线程读取共享数据 ===
    let skip = (v[0] == 0.0);
    let vTv = v[m - col];
    let vIdx = row - col; // 当前行在子向量中的索引

    // 跳过无效列
    if (skip || row < col) {
        return;
    }

    // ✅ 改进5：预计算公共因子避免重复除法
    let two_over_vTv = 2.0 / vTv;
    let v_row = select(0.0, v[vIdx], row >= col); // 安全读取

    // === 更新R矩阵（右下子矩阵）===
    for (var j = col; j < n; j++) {
        var dot = 0.0;
        // ✅ 改进6：使用更高效的循环（虽仍是O(m)，但减少重复计算）
        for (var i = col; i < m; i++) {
            let vecIdx = i - col;
            dot += v[vecIdx] * R[i * n + j];
        }
        // ✅ 改进7：使用fma（融合乘加）提高精度
        R[row * n + j] = fma(-v_row * two_over_vTv, dot, R[row * n + j]);
    }

    // === 更新Q矩阵（累积正交变换）===
    for (var j = 0u; j < k; j++) {
        var dot = 0.0;
        for (var i = col; i < m; i++) {
            let vecIdx = i - col;
            dot += v[vecIdx] * Q[i * k + j];
        }
        Q[row * k + j] = fma(-v_row * two_over_vTv, dot, Q[row * k + j]);
    }
}
`
	return createKernelFromWGSL(ctx, "qr_householder", wgslCode, []wgpu.BindGroupLayoutEntry{
		{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
	})
}
