package compute

import (
	"github.com/cogentcore/webgpu/wgpu"
)

// ----------------------------
// Add Kernel
// ----------------------------
const addWGSL = `
@group(0) @binding(0) var<storage, read> a: array<f32>;
@group(0) @binding(1) var<storage, read> b: array<f32>;
@group(0) @binding(2) var<storage, read_write> out: array<f32>;
@compute @workgroup_size(256)
fn main(@builtin(global_invocation_id) id: vec3<u32>) {
    let i = id.x;
    if (i < arrayLength(&a)) {
        out[i] = a[i] + b[i];
    }
}`

func createAddLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
		},
	})
	return layout
}

// ----------------------------
// GEMM Kernel
// ----------------------------
const gemmWGSL = `
struct Params {
    m: u32,
    n: u32,
    k: u32,
    pad: u32,
};
@group(0) @binding(0) var<storage, read> A: array<f32>;
@group(0) @binding(1) var<storage, read> B: array<f32>;
@group(0) @binding(2) var<storage, read_write> C: array<f32>;
@group(0) @binding(3) var<uniform> p: Params;
@compute @workgroup_size(16, 16, 1)  // 从(8,8,1)增大到(16,16,1)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    let row = gid.y;
    let col = gid.x;
    if (row >= p.m || col >= p.n) { return; }
    var sum: f32 = 0.0;
    for (var i: u32 = 0u; i < p.k; i++) {
        sum += A[row * p.k + i] * B[i * p.n + col];
    }
    C[row * p.n + col] = sum;
}`

func createGemmLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}

// ----------------------------
// LU Factorization Kernel
// ----------------------------
const luKernelWGSL = `
struct Params {
    k: u32,
    n: u32,
    pad0: u32,
    pad1: u32,
};
@group(0) @binding(0) var<storage, read_write> A: array<f32>;
@group(0) @binding(1) var<uniform> p: Params;
@compute @workgroup_size(256)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    let idx = gid.x;
    let k = p.k;
    let n = p.n;
    if (k >= n) { return; }
    // Update row k (U)
    if (idx >= k && idx < n) {
        var sum: f32 = 0.0;
        for (var p_idx: u32 = 0u; p_idx < k; p_idx++) {
            sum += A[k * n + p_idx] * A[p_idx * n + idx];
        }
        A[k * n + idx] -= sum;
    }
    // Update column k (L)
    if (idx > k && idx < n) {
        var sum: f32 = 0.0;
        for (var p_idx: u32 = 0u; p_idx < k; p_idx++) {
            sum += A[idx * n + p_idx] * A[p_idx * n + k];
        }
        if (abs(A[k * n + k]) < 1e-6) {
            A[idx * n + k] = 0.0;
        } else {
            A[idx * n + k] = (A[idx * n + k] - sum) / A[k * n + k];
        }
    }
}`

// ----------------------------
// Forward Substitution (L * y = Pb)
// L is n×n (stored in full), Pb and y are flat vectors of length n
// ----------------------------
const forwardSubWGSL = `
struct Params {
    n: u32,
    pad0: u32,
    pad1: u32,
    pad2: u32,
};
@group(0) @binding(0) var<storage, read> L: array<f32>;
@group(0) @binding(1) var<storage, read> B: array<f32>;
@group(0) @binding(2) var<storage, read_write> Y: array<f32>;
@group(0) @binding(3) var<uniform> p: Params;
@compute @workgroup_size(1)  // 修复:从 256 改为 1
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    // 保持防御性检查(虽然 workgroup 大小为1,但保留以增加健壮性)
    if (gid.x != 0u) { return; }
    for (var i: u32 = 0u; i < p.n; i++) {
        var sum: f32 = 0.0;
        for (var j: u32 = 0u; j < i; j++) {
            sum += L[i * p.n + j] * Y[j];
        }
        Y[i] = B[i] - sum;
    }
}`

// ----------------------------
// Backward Substitution (U * x = y)
// U is n×n (stored in full), y and x are flat vectors of length n
// ----------------------------
const backwardSubWGSL = `
struct Params {
    n: u32,
    pad0: u32,
    pad1: u32,
    pad2: u32,
};
@group(0) @binding(0) var<storage, read> U: array<f32>;
@group(0) @binding(1) var<storage, read> Y: array<f32>;
@group(0) @binding(2) var<storage, read_write> X: array<f32>;
@group(0) @binding(3) var<uniform> p: Params;
@compute @workgroup_size(1)  // 修复:从 256 改为 1
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    if (gid.x != 0u) { return; }
    for (var ii: u32 = 0u; ii < p.n; ii++) {
        let i = p.n - 1u - ii;
        var sum: f32 = 0.0;
        for (var j: u32 = i + 1u; j < p.n; j++) {
            sum += U[i * p.n + j] * X[j];
        }
        let diag = U[i * p.n + i];
        X[i] = (Y[i] - sum) / diag;
    }
}`

// find_pivot.wgsl
const findPivotWGSL = `
struct Params {
    k: u32,
    n: u32,
    pad0: u32,
    pad1: u32,
};
@group(0) @binding(0) var<storage, read> A: array<f32>;
@group(0) @binding(1) var<storage, read_write> pivot_out: array<u32>; // size = n
@group(0) @binding(2) var<uniform> p: Params;
@compute @workgroup_size(256)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    let k = p.k;
    let n = p.n;
    if (k >= n) { return; }
    // Only one thread does the search (simpler for now)
    if (gid.x != 0u) { return; }
    var imax = k;
    var max_val = abs(A[k * n + k]);
    for (var i = k + 1u; i < n; i++) {
        let val = abs(A[i * n + k]);
        if (val > max_val) {
            max_val = val;
            imax = i;
        }
    }
    pivot_out[k] = imax;
}`

const swapRowsWGSL = `
struct Params {
    row1: u32,
    row2: u32,
    n: u32,
    k_start: u32,  // ← 改名 pad 为 k_start
};
@group(0) @binding(0) var<storage, read_write> A: array<f32>;
@group(0) @binding(1) var<uniform> p: Params;
@compute @workgroup_size(256)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    let col = gid.x;
    if (col < p.k_start || col >= p.n) { return; }  // ← 只交换 [k_start, n)
    let idx1 = p.row1 * p.n + col;
    let idx2 = p.row2 * p.n + col;
    let temp = A[idx1];
    A[idx1] = A[idx2];
    A[idx2] = temp;
}`

// ----------------------------
// Layout Creators
// ----------------------------
func createLUKernelLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}

func createLUSolveLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}

func createFindPivotLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}

func createSwapRowsLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}

const luFullWGSL = `
// lu_full.wgsl
struct Params {
    n: u32,
    debug_pivot: f32, // 用于调试主元值
    pad1: u32,
    pad2: u32,
};

@group(0) @binding(0) var<storage, read_write> A: array<f32>;
@group(0) @binding(1) var<storage, read_write> pivot: array<u32>;
@group(0) @binding(2) var<uniform> p: Params;

@compute @workgroup_size(1)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    if (gid.x != 0u || gid.y != 0u || gid.z != 0u) {
        return;
    }
    
    let n = p.n;
    if (n == 0u) {
        return;
    }

    // 初始化置换映射
    for (var i: u32 = 0u; i < n; i++) {
        pivot[i] = i;
    }

    var is_singular = 0u;

    for (var k: u32 = 0u; k < n; k++) {
        // 1. 找主元（在A[k:n][k]中）
        var imax = k;
        var max_val = abs(A[k * n + k]);
        for (var i = k + 1u; i < n; i++) {
            let val = abs(A[i * n + k]);
            if (val > max_val) {
                max_val = val;
                imax = i;
            }
        }

        // 2. 行交换（同时交换A和pivot映射）
        if (imax != k) {
            for (var col = 0u; col < n; col++) {
                let temp = A[k * n + col];
                A[k * n + col] = A[imax * n + col];
                A[imax * n + col] = temp;
            }
            let temp_p = pivot[k];
            pivot[k] = pivot[imax];
            pivot[imax] = temp_p;
        }

        // 3. 检查主元（使用更宽松的阈值）
        let diag = A[k * n + k];
        // 调试：将主元值存入debug字段（虽然不能打印，但可验证）
        if (k == 0u) {
            // 第一个主元存入debug_pivot
            // 注意：这是一个hack，实际应该用更稳健的方式
        }
        
        if (abs(diag) < 1e-5) {  // 放宽阈值
            is_singular = 1u;
            break;
        }

        // 4. 计算L列（严格下三角）
        for (var i = k + 1u; i < n; i++) {
            A[i * n + k] = A[i * n + k] / diag;
        }

        // 5. 更新右下子矩阵
        for (var i = k + 1u; i < n; i++) {
            for (var j = k + 1u; j < n; j++) {
                A[i * n + j] = A[i * n + j] - A[i * n + k] * A[k * n + j];
            }
        }
    }
    
    // 6. 标记奇异
    if (is_singular == 1u) {
        pivot[0] = 0xFFFFFFFFu;
    }
}`

func createLUFullLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})

	return layout
}

// apply_pivot.wgsl
const applyPivotWGSL = `
struct Params {
    n: u32,
    pad0: u32,
    pad1: u32,
    pad2: u32,
};
@group(0) @binding(0) var<storage, read> b: array<f32>;
@group(0) @binding(1) var<storage, read> piv: array<u32>; // 置换映射
@group(0) @binding(2) var<storage, read_write> Pb: array<f32>;
@group(0) @binding(3) var<uniform> p: Params;
@compute @workgroup_size(256)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
    let i = gid.x;
    if (i >= p.n) { return; }
    // 正确应用置换:新第i行 = 原第 piv[i] 行
    Pb[i] = b[piv[i]];
}`

func createApplyPivotLayout(device *wgpu.Device) *wgpu.BindGroupLayout {
	layout, _ := device.CreateBindGroupLayout(&wgpu.BindGroupLayoutDescriptor{
		Entries: []wgpu.BindGroupLayoutEntry{
			{Binding: 0, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}},
			{Binding: 1, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeReadOnlyStorage}}, // piv as u32, but WGSL reads as array<u32>
			{Binding: 2, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeStorage}},
			{Binding: 3, Visibility: wgpu.ShaderStageCompute, Buffer: wgpu.BufferBindingLayout{Type: wgpu.BufferBindingTypeUniform}},
		},
	})
	return layout
}
