use std::sync::Arc;

use cudarc::cublas::Gemm;
use cudarc::cublas::{sys::cublasOperation_t, CudaBlas, GemmConfig};
use cudarc::driver::{CudaDevice, DeviceSlice};
use tensor::Tensor;

mod tensor;

// C = α * A * B^T + β * C
pub fn matmul_transb(c: &mut Tensor<f32>, beta: f32, a: &Tensor<f32>, b: &Tensor<f32>, alpha: f32) {
    let (m, k) = (a.shape()[0], a.shape()[1]);
    let (n, k2) = (b.shape()[0], b.shape()[1]);
    assert_eq!(
        k, k2,
        "Inner dimensions must match for matrix multiplication"
    );
    assert_eq!(c.shape(), &vec![m, n], "Output matrix shape mismatch");
    // Initialize CUDA and cuBLAS
    let dev = CudaDevice::new(0).unwrap();
    let blas = Arc::new(CudaBlas::new(dev.clone()).unwrap());

    // Convert data to f32 slices
    let a_host: &[f32] =
        unsafe { std::slice::from_raw_parts(a.data().as_ptr() as *const f32, a.data().len()) };
    let b_host: &[f32] =
        unsafe { std::slice::from_raw_parts(b.data().as_ptr() as *const f32, b.data().len()) };
    let c_host: &mut [f32] = unsafe {
        std::slice::from_raw_parts_mut(c.data_mut().as_mut_ptr() as *mut f32, c.data().len())
    };

    // Transfer data to GPU
    let a_dev = dev.htod_sync_copy(a_host).unwrap();
    let b_dev = dev.htod_sync_copy(b_host).unwrap();
    let mut c_dev = dev.htod_sync_copy(c_host).unwrap();
    let m = m as i32;
    let n = n as i32;
    let k = k as i32;

    // Perform matrix multiplication using cuBLAS

    // C = α * A * B^T + β * C
    let gemm_cfg = GemmConfig {
        transa: cublasOperation_t::CUBLAS_OP_T,
        transb: cublasOperation_t::CUBLAS_OP_N,
        m: n,
        n: m,
        k: k,
        alpha: alpha,
        lda: k,
        ldb: k,
        beta: beta,
        ldc: n,
    };

    unsafe {
        blas.gemm(
            gemm_cfg,
            &b_dev.slice(0..b_dev.len()),
            &a_dev.slice(0..a_dev.len()),
            &mut c_dev,
        )
        .unwrap();
    }

    // Transfer result back to host
    dev.dtoh_sync_copy_into(&c_dev, c_host).unwrap();
}

fn main() {}

#[test]
fn test_matmul_transb() {
    let mut c = Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]);
    let a = Tensor::<f32>::new(vec![1., 2., 3., 4., 5., 6.], &vec![2, 3]);
    let b = Tensor::<f32>::new(vec![1., 2., 3., 4., 5., 6.], &vec![2, 3]);
    matmul_transb(&mut c, 1., &a, &b, 1.);
    assert!(c.close_to(
        &Tensor::<f32>::new(vec![15., 34., 35., 81.], &vec![2, 2]),
        1e-3
    ));
}

#[test]
fn test3() {
    let mut c = Tensor::new(vec![0.0; 4], &vec![2, 2]);
    let a = Tensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], &vec![2, 3]);
    let b = Tensor::new(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], &vec![2, 3]);

    matmul_transb(&mut c, 0.0, &a, &b, 1.0);

    let expected = Tensor::new(vec![14.0, 32.0, 32.0, 77.0], &vec![2, 2]);
    assert!(c.close_to(&expected, 1e-5));
}

#[test]
fn test_mulmat_transb_comprehensive() {
    // Test 1: 1x1 matrices
    let mut c = Tensor::<f32>::new(vec![2.], &vec![1, 1]);
    let a = Tensor::<f32>::new(vec![3.], &vec![1, 1]);
    let b = Tensor::<f32>::new(vec![4.], &vec![1, 1]);
    matmul_transb(&mut c, 0.5, &a, &b, 2.0);
    assert!(c.close_to(&Tensor::<f32>::new(vec![25.], &vec![1, 1]), 1e-3));

    // Test 2: Zero matrix
    let mut c = Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]);
    let a = Tensor::<f32>::new(vec![0., 0., 0., 0., 0., 0.], &vec![2, 3]);
    let b = Tensor::<f32>::new(vec![1., 2., 3., 4., 5., 6.], &vec![2, 3]);
    matmul_transb(&mut c, 1.0, &a, &b, 1.0);
    assert!(c.close_to(&Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]), 1e-3));

    // Test 3: Identity matrix
    let mut c = Tensor::<f32>::new(vec![0., 0., 0., 0.], &vec![2, 2]);
    let a = Tensor::<f32>::new(vec![1., 0., 0., 1.], &vec![2, 2]);
    let b = Tensor::<f32>::new(vec![1., 0., 0., 1.], &vec![2, 2]);
    matmul_transb(&mut c, 0.0, &a, &b, 1.0);
    assert!(c.close_to(&Tensor::<f32>::new(vec![1., 0., 0., 1.], &vec![2, 2]), 1e-3));

    // Test 4: Large matrices and precision
    let mut c = Tensor::<f32>::new(vec![0.; 16], &vec![4, 4]);
    let a = Tensor::<f32>::new((0..32).map(|x| x as f32).collect(), &vec![4, 8]);
    let b = Tensor::<f32>::new((0..32).map(|x| x as f32).collect(), &vec![4, 8]);
    matmul_transb(&mut c, 0.0, &a, &b, 1.0);
    let expected = Tensor::<f32>::new(
        vec![
            140., 364., 588., 812., 364., 1100., 1836., 2572., 588., 1836., 3084., 4332., 812.,
            2572., 4332., 6092.,
        ],
        &vec![4, 4],
    );
    assert!(c.close_to(&expected, 1e-3));

    // Test 5: Different alpha and beta combinations
    let mut c = Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]);
    let a = Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]);
    let b = Tensor::<f32>::new(vec![1., 2., 3., 4.], &vec![2, 2]);
    matmul_transb(&mut c, 2.0, &a, &b, 3.0);
    assert!(c.close_to(
        &Tensor::<f32>::new(vec![17., 37., 39., 83.], &vec![2, 2]),
        1e-3
    ));
}

#[test]
pub fn test_mulmat_transb2() {
    let a = Tensor::<f32>::new(
        vec![
            0.9999995, 0.9999995, 0.9999995, 0.9999995, 0.9999995, 0.9999995, 0.9999995, 0.9999995,
        ],
        &vec![4, 2],
    );
    let b = Tensor::<f32>::new(vec![0.1, 0.2, 0.3, 0.4, 0.5, 0.6], &vec![3, 2]);
    let mut c = Tensor::<f32>::default(&vec![4, 3]);

    matmul_transb(&mut c, 0., &a, &b, 1.);
    assert!(c.close_to(
        &Tensor::<f32>::new(
            vec![
                0.29999986, 0.6999997, 1.0999994, 0.29999986, 0.6999997, 1.0999994, 0.29999986,
                0.6999997, 1.0999994, 0.29999986, 0.6999997, 1.0999994
            ],
            &vec![4, 3]
        ),
        1e-3
    ));
}
