use half::f16;
use once_cell::sync::Lazy;
use std::io;
use std::sync::Mutex;

use crate::tensor::Tensor;

// NPU hardware constants
const NPU_ALIGN_K: usize = 64; // K dimension must be aligned to 64 for FP16
const NPU_MIN_M: usize = 1;
const NPU_MIN_K: usize = 32;
const NPU_MIN_N: usize = 8;

// Memory pool for NPU operations
struct NpuMemoryPool {
    device: rk3588_rs::NpuDevice,
}

impl NpuMemoryPool {
    fn new() -> io::Result<Self> {
        let device = rk3588_rs::NpuDevice::open()?;
        device.reset()?;

        Ok(NpuMemoryPool { device })
    }

    fn get_device(&self) -> &rk3588_rs::NpuDevice {
        &self.device
    }
}

// Global NPU memory pool with lazy initialization
static NPU_POOL: Lazy<Mutex<Option<NpuMemoryPool>>> = Lazy::new(|| Mutex::new(None));

/// Get or initialize the global NPU device
fn get_npu_device() -> io::Result<&'static rk3588_rs::NpuDevice> {
    let mut pool = NPU_POOL.lock().unwrap();

    if pool.is_none() {
        *pool = Some(NpuMemoryPool::new()?);
    }

    // This is safe because we never remove the NpuMemoryPool once created
    // and we're holding the lock
    let pool_ref = pool.as_ref().unwrap();
    let device_ptr = pool_ref.get_device() as *const rk3588_rs::NpuDevice;

    Ok(unsafe { &*device_ptr })
}

/// Check if dimensions are suitable for NPU acceleration
fn should_use_npu(m: usize, k: usize, n: usize) -> bool {
    // Only use NPU for sufficiently large matrices
    // Small matrices have too much overhead
    m >= NPU_MIN_M && k >= NPU_MIN_K && n >= NPU_MIN_N && (m * k * n) >= 4096 // Total operations threshold
}

/// Align K dimension to NPU requirement
fn align_k(k: usize) -> usize {
    ((k + NPU_ALIGN_K - 1) / NPU_ALIGN_K) * NPU_ALIGN_K
}

/// NPU-accelerated matrix multiplication: C = beta * C + alpha * A @ B^T
/// A: (m x k), B: (n x k), C: (m x n)
pub fn npu_matmul_transb<T: Copy + Default + num_traits::Float>(
    c: &mut Tensor<T>,
    beta: T,
    a: &Tensor<T>,
    b: &Tensor<T>,
    alpha: T,
) -> io::Result<()> {
    let m = a.shape()[0];
    let k = a.shape()[1];
    let n = b.shape()[0];

    // Align K dimension
    let k_aligned = align_k(k);

    // Memory size requirements
    let regcmd_size = 1024;
    let tasks_size = 1024;
    let input_size = (m * k_aligned * 2).max(4096); // FP16, minimum 4KB
    let weight_size = (n * k_aligned * 2).max(4096); // FP16, minimum 4KB
    let output_size = (m * n * 4).max(4096); // FP32, minimum 4KB

    // Get global NPU device (lazy initialized)
    let device = get_npu_device()?;

    // Allocate memory for this operation
    let mut regcmd_mem = device.mem_allocate(regcmd_size, 0)?;
    let mut tasks_mem = device.mem_allocate(tasks_size, rk3588_rs::RKNPU_MEM_KERNEL_MAPPING)?;
    let mut input_mem = device.mem_allocate(input_size, 0)?;
    let mut weights_mem = device.mem_allocate(weight_size, 0)?;
    let mut output_mem = device.mem_allocate(output_size, 0)?;

    // Prepare NPU task
    let mut npu_regs = [0u64; 112];
    let mut params = rk3588_rs::MatmulParams {
        m: m as u16,
        k: k_aligned as u16,
        n: n as u16,
        input_dma: input_mem.dma_addr() as u32,
        weights_dma: weights_mem.dma_addr() as u32,
        output_dma: output_mem.dma_addr() as u32,
        tasks: npu_regs.as_mut_ptr(),
        fp32tofp16: 0, // Output as FP32
    };

    rk3588_rs::gen_matmul_fp16(&mut params).map_err(|e| {
        io::Error::new(
            io::ErrorKind::Other,
            format!("gen_matmul_fp16 failed: {}", e),
        )
    })?;

    // Copy register commands to NPU memory
    let regcmd_slice = regcmd_mem.as_slice_mut();
    unsafe {
        std::ptr::copy_nonoverlapping(
            npu_regs.as_ptr() as *const u8,
            regcmd_slice.as_mut_ptr(),
            std::mem::size_of_val(&npu_regs),
        );
    }

    // Convert matrix A to FP16 and fill input memory with proper layout
    let input_slice = input_mem.as_slice_mut();
    let input_fp16 = unsafe {
        std::slice::from_raw_parts_mut(input_slice.as_mut_ptr() as *mut f16, m * k_aligned)
    };

    // Zero out padding
    input_fp16.fill(f16::from_f32(0.0));

    let data_a = a.data();
    for row in 0..m {
        for col in 0..k {
            // feature_data parameters: (c, h, w, c2, c_val, h_val, w_val)
            // h should be calculated based on m to ensure proper plane calculation
            let h = ((m + 3) / 4).max(1) as i32; // Round up to nearest multiple of 4, minimum 1
            let idx = rk3588_rs::feature_data(
                k_aligned as i32,
                h,
                1,
                8,
                (col + 1) as i32,
                (row + 1) as i32,
                1,
            ) as usize;

            // Bounds check to prevent panic
            if idx >= input_fp16.len() {
                return Err(io::Error::new(
                    io::ErrorKind::InvalidInput,
                    format!(
                        "Index out of bounds in input: idx={}, len={}, row={}, col={}, k_aligned={}, m={}, h={}",
                        idx,
                        input_fp16.len(),
                        row,
                        col,
                        k_aligned,
                        m,
                        h
                    ),
                ));
            }

            let value = data_a[row * k + col];
            input_fp16[idx] = f16::from_f32(value.to_f32().unwrap());
        }
    }

    // Convert matrix B to FP16 and fill weights memory with proper layout
    let weights_slice = weights_mem.as_slice_mut();
    let weights_fp16 = unsafe {
        std::slice::from_raw_parts_mut(weights_slice.as_mut_ptr() as *mut f16, n * k_aligned)
    };

    // Zero out padding
    weights_fp16.fill(f16::from_f32(0.0));

    let data_b = b.data();
    for row in 0..n {
        for col in 0..k {
            let idx = rk3588_rs::weight_fp16(k_aligned as i32, (row + 1) as i32, (col + 1) as i32)
                as usize;

            // Bounds check to prevent panic
            if idx >= weights_fp16.len() {
                return Err(io::Error::new(
                    io::ErrorKind::InvalidInput,
                    format!(
                        "Index out of bounds in weights: idx={}, len={}, row={}, col={}, k_aligned={}, n={}",
                        idx,
                        weights_fp16.len(),
                        row,
                        col,
                        k_aligned,
                        n
                    ),
                ));
            }

            let value = data_b[row * k + col];
            weights_fp16[idx] = f16::from_f32(value.to_f32().unwrap());
        }
    }

    // Clear output memory
    let output_slice = output_mem.as_slice_mut();
    output_slice[..(m * n * 4)].fill(0);

    // Setup task structure
    let tasks_slice = tasks_mem.as_slice_mut();
    let tasks = unsafe { &mut *(tasks_slice.as_mut_ptr() as *mut rk3588_rs::RknpuTask) };
    tasks.flags = 0;
    tasks.op_idx = 0;
    tasks.enable_mask = 0xd;
    tasks.int_mask = 0x300;
    tasks.int_clear = 0x1ffff;
    tasks.int_status = 0;
    tasks.regcfg_amount = (npu_regs.len() as u32) - (rk3588_rs::RKNPU_PC_DATA_EXTRA_AMOUNT + 4);
    tasks.regcfg_offset = 0;
    tasks.regcmd_addr = regcmd_mem.dma_addr();

    // Submit task to NPU
    let mut submit = rk3588_rs::RknpuSubmit {
        flags: rk3588_rs::RKNPU_JOB_PC | rk3588_rs::RKNPU_JOB_BLOCK | rk3588_rs::RKNPU_JOB_PINGPONG,
        timeout: 6000,
        task_start: 0,
        task_number: 1,
        task_counter: 0,
        priority: 0,
        task_obj_addr: tasks_mem.obj_addr(),
        regcfg_obj_addr: 0,
        task_base_addr: 0,
        user_data: 0,
        core_mask: 1,
        fence_fd: -1,
        subcore_task: [
            rk3588_rs::RknpuSubcoreTask {
                task_start: 0,
                task_number: 1,
            },
            rk3588_rs::RknpuSubcoreTask {
                task_start: 1,
                task_number: 0,
            },
            rk3588_rs::RknpuSubcoreTask {
                task_start: 2,
                task_number: 0,
            },
            rk3588_rs::RknpuSubcoreTask {
                task_start: 0,
                task_number: 0,
            },
            rk3588_rs::RknpuSubcoreTask {
                task_start: 0,
                task_number: 0,
            },
        ],
    };

    device.submit(&mut submit)?;

    // Read back results and apply alpha/beta scaling
    let output_f32 =
        unsafe { std::slice::from_raw_parts(output_slice.as_ptr() as *const f32, m * n) };

    let data_c = c.data_mut();
    // Calculate h for output based on m dimension
    let h_output = ((m + 3) / 4).max(1) as i32; // Round up to nearest multiple of 4, minimum 1

    for row in 0..m {
        for col in 0..n {
            // Output layout: feature_data(n, h, w, c2, col+1, row+1, 1)
            // where h is calculated based on m
            let idx = rk3588_rs::feature_data(
                n as i32,
                h_output,
                1,
                4,
                (col + 1) as i32,
                (row + 1) as i32,
                1,
            ) as usize;

            // Bounds check for output
            if idx >= output_f32.len() {
                return Err(io::Error::new(
                    io::ErrorKind::InvalidInput,
                    format!(
                        "Index out of bounds in output: idx={}, len={}, row={}, col={}, n={}, m={}, h_output={}",
                        idx,
                        output_f32.len(),
                        row,
                        col,
                        n,
                        m,
                        h_output
                    ),
                ));
            }

            let npu_result = output_f32[idx];

            // C = beta * C + alpha * (A @ B^T)
            let old_value = data_c[row * n + col];
            let new_value = T::from(
                beta.to_f32().unwrap() * old_value.to_f32().unwrap()
                    + alpha.to_f32().unwrap() * npu_result,
            )
            .unwrap();
            data_c[row * n + col] = new_value;
        }
    }

    // 释放内存
    device.mem_destroy(regcmd_mem.handle(), regcmd_mem.obj_addr())?;
    device.mem_destroy(tasks_mem.handle(), tasks_mem.obj_addr())?;
    device.mem_destroy(input_mem.handle(), input_mem.obj_addr())?;
    device.mem_destroy(weights_mem.handle(), weights_mem.obj_addr())?;
    device.mem_destroy(output_mem.handle(), output_mem.obj_addr())?;
    Ok(())
}

/// CPU fallback for small matrices or when NPU fails
pub fn cpu_matmul_transb<
    T: Copy + Default + num_traits::Float + std::iter::Sum + std::ops::MulAssign + std::ops::AddAssign,
>(
    c: &mut Tensor<T>,
    beta: T,
    a: &Tensor<T>,
    b: &Tensor<T>,
    alpha: T,
) {
    let data_a = a.data();
    let data_b = b.data();
    let data_c = c.data_mut();

    let m = a.shape()[0];
    let k = a.shape()[1];
    let n = b.shape()[0];

    // 使用实际的数据长度来计算 b 的列数，而不是依赖可能不准确的 shape
    let k_b = data_b.len() / n;

    let actual_k = k.min(k_b); // 使用较小的 k 值来避免越界

    let mut a_times_b: Vec<T> = Vec::with_capacity(m * n);

    for i in 0..m {
        for j in 0..n {
            let element: T = (0..actual_k)
                .map(|x| data_a[i * k + x] * data_b[j * k_b + x])
                .sum();
            a_times_b.push(element);
        }
    }

    for (i, c_ele) in data_c.iter_mut().enumerate() {
        *c_ele *= beta;
        *c_ele += alpha * a_times_b[i];
    }
}

/// Smart matrix multiplication that chooses between NPU and CPU
pub fn smart_matmul_transb<
    T: Copy + Default + num_traits::Float + std::iter::Sum + std::ops::MulAssign + std::ops::AddAssign,
>(
    c: &mut Tensor<T>,
    beta: T,
    a: &Tensor<T>,
    b: &Tensor<T>,
    alpha: T,
) {
    let m = a.shape()[0];
    let k = a.shape()[1];
    let n = b.shape()[0];

    // Decide whether to use NPU or CPU
    if should_use_npu(m, k, n) {
        match npu_matmul_transb(c, beta, a, b, alpha) {
            Ok(_) => return,
            Err(e) => {
                // Fall back to CPU if NPU fails
                eprintln!(
                    "NPU matmul failed (M={}, K={}, N={}): {}, falling back to CPU",
                    m, k, n, e
                );
            }
        }
    }

    // Use CPU implementation
    cpu_matmul_transb(c, beta, a, b, alpha);
}
