use std::sync::{Arc, Mutex,atomic::{AtomicPtr,AtomicIsize,}};

/**
 * #define  mi_atomic(name)        std::atomic_##name
 * #define  mi_memory_order(name)  std::memory_order_##name
 * #define MI_ATOMIC_VAR_INIT(x)  ATOMIC_VAR_INIT(x)
 */

 
 use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering};

 // Equivalent to `std::atomic_*` in C
 // In Rust, the atomic types are `AtomicBool`, `AtomicU8`, `AtomicUsize`, etc.
 pub type MiAtomicBool = AtomicBool;
 pub type MiAtomicU8 = AtomicU8;
 pub type MiAtomicUsize = AtomicUsize;
 
 // Equivalent to `std::memory_order_*` in C
 // Rust uses the `Ordering` enum for memory ordering
 pub mod mi_memory_order {
     pub const RELAXED: std::sync::atomic::Ordering = std::sync::atomic::Ordering::Relaxed;
     pub const ACQUIRE: std::sync::atomic::Ordering = std::sync::atomic::Ordering::Acquire;
     pub const RELEASE: std::sync::atomic::Ordering = std::sync::atomic::Ordering::Release;
     pub const ACQ_REL: std::sync::atomic::Ordering = std::sync::atomic::Ordering::AcqRel;
     pub const SEQ_CST: std::sync::atomic::Ordering = std::sync::atomic::Ordering::SeqCst;
 }
 
 // Equivalent to `ATOMIC_VAR_INIT(x)` in C
 // In Rust, atomic variables are initialized using `Atomic*::new(x)`
 #[macro_export]
 macro_rules! mi_atomic_var_init {
     ($x:expr) => {
         std::sync::atomic::AtomicUsize::new($x)
     };
 }

 pub fn mi_atomic_cas_weak(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
    mem_success: Ordering,
    mem_fail: Ordering,
) -> Result<usize, usize> {
    p.compare_exchange_weak(*expected as usize, desired as usize, mem_success, mem_fail)
        .map_err(|e| e)
}

// Atomic CAS strong (compare_exchange_strong)
pub fn mi_atomic_cas_strong(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
    mem_success: Ordering,
    mem_fail: Ordering,
) -> Result<usize, usize> {
    p.compare_exchange(*expected as usize, desired as usize, mem_success, mem_fail)
        .map_err(|e| e)
}

// Load with acquire memory order
pub fn mi_atomic_load_acquire(p: &AtomicUsize) -> usize {
    p.load(Ordering::Acquire)
}

// Load with relaxed memory order
pub fn mi_atomic_load_relaxed(p: &AtomicUsize) -> usize {
    p.load(Ordering::Relaxed)
}

// Store with release memory order
pub fn mi_atomic_store_release(p: &AtomicUsize, x: usize) {
    p.store(x, Ordering::Release)
}

// Store with relaxed memory order
pub fn mi_atomic_store_relaxed(p: &AtomicUsize, x: usize) {
    p.store(x, Ordering::Relaxed)
}

// Exchange with release memory order
pub fn mi_atomic_exchange_release(p: &AtomicUsize, x: usize) -> usize {
    p.swap(x, Ordering::Release)
}

// Exchange with acquire-release memory order
pub fn mi_atomic_exchange_acq_rel(p: &AtomicUsize, x: usize) -> usize {
    p.swap(x, Ordering::AcqRel)
}

// CAS weak with release and relaxed memory order
pub fn mi_atomic_cas_weak_release(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
) -> Result<usize, usize> {
    mi_atomic_cas_weak(p, expected, desired, Ordering::Release, Ordering::Relaxed)
}

// CAS weak with acquire-release and acquire memory order
pub fn mi_atomic_cas_weak_acq_rel(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
) -> Result<usize, usize> {
    mi_atomic_cas_weak(p, expected, desired, Ordering::AcqRel, Ordering::Acquire)
}

// CAS strong with release and relaxed memory order
pub fn mi_atomic_cas_strong_release(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
) -> Result<usize, usize> {
    mi_atomic_cas_strong(p, expected, desired, Ordering::Release, Ordering::Relaxed)
}

// CAS strong with acquire-release and acquire memory order
pub fn mi_atomic_cas_strong_acq_rel(
    p: &AtomicUsize,
    expected: &mut usize,
    desired: usize,
) -> Result<usize, usize> {
    mi_atomic_cas_strong(p, expected, desired, Ordering::AcqRel, Ordering::Acquire)
}

// Atomic addition with relaxed memory order
pub fn mi_atomic_add_relaxed(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_add(x, Ordering::Relaxed)
}

// Atomic subtraction with relaxed memory order
pub fn mi_atomic_sub_relaxed(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_sub(x, Ordering::Relaxed)
}

// Atomic addition with acquire-release memory order
pub fn mi_atomic_add_acq_rel(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_add(x, Ordering::AcqRel)
}

// Atomic subtraction with acquire-release memory order
pub fn mi_atomic_sub_acq_rel(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_sub(x, Ordering::AcqRel)
}

// Atomic AND with acquire-release memory order
pub fn mi_atomic_and_acq_rel(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_and(x, Ordering::AcqRel)
}

// Atomic OR with acquire-release memory order
pub fn mi_atomic_or_acq_rel(p: &AtomicUsize, x: usize) -> usize {
    p.fetch_or(x, Ordering::AcqRel)
}

// Increment with relaxed memory order
pub fn mi_atomic_increment_relaxed(p: &AtomicUsize) -> usize {
    mi_atomic_add_relaxed(p, 1)
}

// Decrement with relaxed memory order
pub fn mi_atomic_decrement_relaxed(p: &AtomicUsize) -> usize {
    mi_atomic_sub_relaxed(p, 1)
}

// Increment with acquire-release memory order
pub fn mi_atomic_increment_acq_rel(p: &AtomicUsize) -> usize {
    mi_atomic_add_acq_rel(p, 1)
}

// Decrement with acquire-release memory order
pub fn mi_atomic_decrement_acq_rel(p: &AtomicUsize) -> usize {
    mi_atomic_sub_acq_rel(p, 1)
}

// Atomic Yield (no equivalent in Rust, would need to be handled differently in Rust)
pub fn mi_atomic_yield() {
    std::thread::yield_now();
}

// Atomic addition for intptr_t
pub fn mi_atomic_addi(p: &AtomicIsize, add: isize) -> isize {
    p.fetch_add(add, Ordering::SeqCst)
}

// Atomic subtraction for intptr_t
pub fn mi_atomic_subi(p: &AtomicIsize, sub: isize) -> isize {
    p.fetch_sub(sub, Ordering::SeqCst)
}
 
// !todo: specific c type complier accerlaration

pub type MiAtomicOnce = AtomicUsize; 

pub fn mi_atomic_once(once: &MiAtomicOnce) -> bool {
    if mi_atomic_load_relaxed(once) != 0 {
        false
    }
    else {
        let mut expect: usize = 0;
        mi_atomic_cas_strong_acq_rel(once, &mut expect, 1).unwrap() != 0
    }
}

pub type MiAtomicGuard = AtomicUsize;

pub fn mi_atomic_guard(guard: &AtomicUsize) {
    let mut _mi_guard_expected = 0;
    let mut _mi_guard_once = true;

    while _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard, &mut _mi_guard_expected, 1).unwrap() != 0 {
        // Critical section guarded by `guard`
        // Code to run within the critical section goes here.

        // Release the guard after the critical section
        mi_atomic_store_release(guard, 0);

        // Ensure we only enter the critical section once
        _mi_guard_once = false;
    }
}
// yield 
// todo: yield implementation in different platform
