use std::{
    future::Future,
    ops::Deref,
    sync::{
        atomic::{AtomicUsize, Ordering},
        Arc, Mutex,
    },
};
use tokio::task::AbortHandle;

fn usize_msb_marked(val: usize) -> bool {
    val & (1 << (usize::BITS - 1)) != 0
}
fn new_msb_marked_usize() -> usize {
    1 << (usize::BITS - 1)
}

struct Slot {
    data: Option<AbortHandle>,
    next_free: usize,
}

struct Coordination {
    abort_all_started: Mutex<bool>,
    next_free: AtomicUsize,
}

pub(crate) struct AutoAbort<const C: usize> {
    coordination: Arc<Coordination>,
    chunks: Vec<Box<[Slot; C]>>,
}

impl<const C: usize> AutoAbort<C> {
    fn new_free_chunk(after_n_chunks: usize) -> (usize, Box<[Slot; C]>) {
        let Some(new_size) = C.checked_mul(after_n_chunks + 1) else {
            panic!("index number would exceed type limit!")
        };
        if usize_msb_marked(new_size - 1) {
            panic!("index number would be considered invalid!")
        }
        let first_idx = new_size - C;
        let second_idx = first_idx + 1;
        let mut new_chunk = std::array::from_fn(|i| Slot {
            data: None,
            next_free: second_idx + i,
        });
        new_chunk[C - 1].next_free = new_msb_marked_usize();
        (first_idx, Box::new(new_chunk))
    }

    pub(crate) fn new() -> Self {
        assert!(C > 0 && C.count_ones() == 1 && !usize_msb_marked(C - 1));
        Self {
            coordination: Arc::new(Coordination {
                abort_all_started: Mutex::new(false),
                next_free: AtomicUsize::new(0),
            }),
            chunks: vec![Self::new_free_chunk(0).1],
        }
    }

    /// # Safety
    ///
    /// The caller must ensure that `idx < self.chunks.len() * C`
    unsafe fn idx_to_slot_ref(&self, idx: usize) -> &Slot {
        self.chunks.get_unchecked(idx / C).get_unchecked(idx % C)
    }

    /// # Safety
    ///
    /// The caller must ensure that `idx < self.chunks.len() * C`
    unsafe fn idx_to_slot_mut(&mut self, idx: usize) -> &mut Slot {
        self.chunks
            .get_unchecked_mut(idx / C)
            .get_unchecked_mut(idx % C)
    }

    pub(crate) fn spawn<F: Future + Send + 'static>(&mut self, f: F) {
        let next_free = 'blk: {
            // try to get the next free slot in the free list first:
            let mut cur = self.coordination.next_free.load(Ordering::Relaxed);
            while !usize_msb_marked(cur) {
                // the above check tell us that there is a valid free slot at the
                // head of our free list, we set the second slot as the new head
                // so we can use the first one.
                let new = unsafe { self.idx_to_slot_ref(cur) }.next_free;
                match self.coordination.next_free.compare_exchange_weak(
                    cur,
                    new,
                    Ordering::Relaxed,
                    Ordering::Relaxed,
                ) {
                    Ok(_) => break 'blk new,
                    Err(updated) => cur = updated,
                }
            }

            // the check above tells us that there is no free slot, we allocate
            // a new free chunk, use the first slot in the new chunk, and link
            // the rest to the front of our free list:
            let (start_idx, chunk) = Self::new_free_chunk(self.chunks.len());
            self.chunks.push(chunk);
            let chunk = self.chunks.last_mut().unwrap();
            let _ = self.coordination.next_free.fetch_update(
                Ordering::Relaxed,
                Ordering::Relaxed,
                |it| {
                    chunk[C - 1].next_free = it;
                    Some(start_idx + 1)
                },
            );
            start_idx
        };

        let coordination = Arc::clone(&self.coordination);
        let slot = unsafe { self.idx_to_slot_mut(next_free) };
        let slot_handle = SlotHandle {
            coordination,
            slot_idx: next_free,
            slot_ptr: slot,
        };
        slot.data = Some(
            tokio::spawn(async move {
                f.await;
                slot_handle.give_back();
            })
            .abort_handle(),
        );
    }
}

impl<const C: usize> Drop for AutoAbort<C> {
    fn drop(&mut self) {
        *self.coordination.abort_all_started.lock().unwrap() = true;
        // from this point on, any task grabbing the lock would know that
        // the abort all process has started, thus will immediately exit,
        // thus we have exclusive access to everything
        self.chunks
            .iter()
            .flat_map(Deref::deref)
            .filter_map(|slot| slot.data.as_ref())
            .for_each(AbortHandle::abort);
    }
}

struct SlotHandle {
    coordination: Arc<Coordination>,
    slot_ptr: *mut Slot,
    slot_idx: usize,
}

// SAFETY: `slot_ptr` points to a heap allocated memory region, and as per
// the safety discussion in `SlotHandle::give_back`, the use of this ptr
// is safe.
unsafe impl Send for SlotHandle {}

impl SlotHandle {
    fn give_back(self) {
        // this wait should be very short when the abort-all process
        // hasn't started.
        let abort_all_started = self.coordination.abort_all_started.lock().unwrap();
        if *abort_all_started {
            return;
        }

        // SAFETY: the lock above guarantees that the `AutoAbort` (and thus the
        // chunks) will not be dropped while we are holding the lock, and since
        // the chunk list also never shrinks, the slot ptr is guaranteed to be
        // valid.
        let slot = unsafe { &mut *self.slot_ptr };
        slot.data = None;
        let _ =
            self.coordination
                .next_free
                .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |it| {
                    slot.next_free = it;
                    Some(self.slot_idx)
                });

        drop(abort_all_started);
    }
}
