
#![allow(unreachable_patterns)]
#![allow(unused_variables)]

use std::ops::{DerefMut, RangeFrom};
use std::ptr;
use std::time::Duration;
use std::{cell::RefCell, ffi::CString};

use windows::core::Error as WinError;
use windows::Win32::System::IO::OVERLAPPED;

use crate::win32_trace;

use super::aliased_cell::AliasedCell;
use super::share_memory::OsIpcSharedMemory;

use super::enums::{BlockingMode, WinIpcError};
use super::ipc_bridge::{OsOpaqueIpcChannel, OsIpcSelectionResult};
use super::{message::MessageReader, win_handle::WinHandle};

use windows::Win32::Storage::FileSystem;
use windows::Win32::System::Pipes;


#[derive(Debug)]
pub struct OsIpcReceiver {
    /// The receive handle and its associated state.
    ///
    /// We can't just deal with raw handles like in the other platform back-ends,
    /// since this implementation -- using plain pipes with no native packet handling --
    /// requires keeping track of various bits of receiver state,
    /// which must not be separated from the handle itself.
    ///
    /// Note: Inner mutability is necessary,
    /// since the `consume()` method needs to move out the reader
    /// despite only getting a shared reference to `self`.
    pub(super) reader: RefCell<MessageReader>,
}

#[cfg(feature = "windows-shared-memory-equality")]
impl PartialEq for OsIpcReceiver {
    fn eq(&self, other: &OsIpcReceiver) -> bool {
        self.reader.borrow().handle == other.reader.borrow().handle
    }
}

impl OsIpcReceiver {
    pub(super) fn from_handle(handle: WinHandle) -> OsIpcReceiver {
        OsIpcReceiver {
            reader: RefCell::new(MessageReader::new(handle)),
        }
    }

    pub(super) fn new_named(pipe_name: &CString) -> Result<OsIpcReceiver, WinError> {
        unsafe {
            // create the pipe server
            let handle = windows::Win32::System::Pipes::CreateNamedPipeA(
                windows::core::PCSTR::from_raw(pipe_name.as_ptr() as *const u8),
                FileSystem::PIPE_ACCESS_INBOUND | FileSystem::FILE_FLAG_OVERLAPPED,
                Pipes::PIPE_TYPE_BYTE | Pipes::PIPE_READMODE_BYTE | Pipes::PIPE_REJECT_REMOTE_CLIENTS,
                // 1 max instance of this pipe
                1,
                // out/in buffer sizes
                0,
                super::utils::PIPE_BUFFER_SIZE as u32,
                0, // default timeout for WaitNamedPipe (0 == 50ms as default)
                None,
            )?;

            Ok(OsIpcReceiver {
                reader: RefCell::new(MessageReader::new(WinHandle::new(handle))),
            })
        }
    }

    pub(super) fn prepare_for_transfer(&self) -> Result<bool, WinError> {
        let mut reader = self.reader.borrow_mut();
        // cancel any outstanding IO request
        reader.cancel_io();
        // this is only okay if we have nothing in the read buf
        Ok(reader.read_buf.is_empty())
    }

    pub fn consume(&self) -> OsIpcReceiver {
        let mut reader = self.reader.borrow_mut();
        assert!(reader.r#async.is_none());
        OsIpcReceiver::from_handle(reader.handle.take())
    }

    // This is only used for recv/try_recv/try_recv_timeout.  When this is added to an IpcReceiverSet, then
    // the implementation in select() is used.  It does much the same thing, but across multiple
    // channels.
    fn receive_message(
        &self,
        mut blocking_mode: BlockingMode,
    ) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>), WinIpcError> {
        let mut reader = self.reader.borrow_mut();
        assert!(
            reader.entry_id.is_none(),
            "receive_message is only valid before this OsIpcReceiver was added to a Set"
        );
        // This function loops, because in the case of a blocking read, we may need to
        // read multiple sets of bytes from the pipe to receive a complete message.
        loop {
            // First, try to fetch a message, in case we have one pending
            // in the reader's receive buffer
            if let Some((data, channels, shmems)) = reader.get_message()? {
                return Ok((data, channels, shmems));
            }

            // Then, issue a read if we don't have one already in flight.
            reader.start_read()?;

            // Attempt to complete the read.
            //
            // May return `WinError::NoData` in non-blocking mode.
            // The async read remains in flight in that case;
            // and another attempt at getting a result
            // can be done the next time we are called.
            reader.fetch_async_result(blocking_mode)?;

            // If we're not blocking, pretend that we are blocking, since we got part of
            // a message already.  Keep reading until we get a complete message.
            blocking_mode = BlockingMode::Blocking;
        }
    }

    pub fn recv(
        &self,
    ) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>), WinIpcError> {
        win32_trace!("recv");
        self.receive_message(BlockingMode::Blocking)
    }

    pub fn try_recv(
        &self,
    ) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>), WinIpcError> {
        win32_trace!("try_recv");
        self.receive_message(BlockingMode::Nonblocking)
    }

    pub fn try_recv_timeout(
        &self,
        duration: Duration,
    ) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>), WinIpcError> {
        win32_trace!("try_recv_timeout");
        self.receive_message(BlockingMode::Timeout(duration))
    }

    /// Do a pipe connect.
    ///
    /// Only used for one-shot servers.
    pub(super) fn accept(&self) -> Result<(), WinError> {
        unsafe {
            let reader_borrow = self.reader.borrow();
            let handle = &reader_borrow.handle;
            // Boxing this to get a stable address is not strictly necesssary here,
            // since we are not moving the local variable around -- but better safe than sorry...
            let mut ov = AliasedCell::new(Box::new(std::mem::zeroed::<OVERLAPPED>()));
            let ok = Pipes::ConnectNamedPipe(handle.as_raw(), Some(ov.alias_mut().deref_mut()));

            // we should always get false with async IO
            assert_eq!(ok, false);
            let result = match windows::Win32::Foundation::GetLastError() {
                // did we successfully connect? (it's reported as an error [ok==false])
                ERROR_PIPE_CONNECTED => {
                    win32_trace!("[$ {:?}] accept (PIPE_CONNECTED)", handle.as_raw());
                    Ok(())
                },

                // This is a weird one -- if we create a named pipe (like we do
                // in new() ), the client connects, sends data, then drops its handle,
                // a Connect here will get ERROR_NO_DATA -- but there may be data in
                // the pipe that we'll be able to read.  So we need to go do some reads
                // like normal and wait until ReadFile gives us ERROR_NO_DATA.
                ERROR_NO_DATA => {
                    win32_trace!("[$ {:?}] accept (ERROR_NO_DATA)", handle.as_raw());
                    Ok(())
                },

                // the connect is pending; wait for it to complete
                ERROR_IO_PENDING => {
                    let mut nbytes: u32 = 0;
                    let ok = windows::Win32::System::IO::GetOverlappedResult(
                        handle.as_raw(),
                        ov.alias_mut().deref_mut(),
                        &mut nbytes,
                        true,
                    );
                    if ok == false {
                        return Err(WinError::from_win32());
                    }
                    Ok(())
                },

                // Anything else signifies some actual I/O error.
                err => {
                    win32_trace!("[$ {:?}] accept error -> {:?}", handle.as_raw(), err);
                    Err(WinError::new(
                        err.to_hresult(),
                        windows::core::HSTRING::from("ConnectNamedPipe"),
                    ))
                },
            };

            ov.into_inner();
            result
        }
    }

    /// Does a single explicitly-sized recv from the handle,
    /// consuming the receiver in the process.
    ///
    /// This is used for receiving data from the out-of-band big data buffer.
    pub(super) fn recv_raw(self, size: usize) -> Result<Vec<u8>, WinIpcError> {
        self.reader.into_inner().read_raw_sized(size)
    }
}



// We need to explicitly declare this, because of the raw pointer
// contained in the `OVERLAPPED` structure.
//
// Note: the `Send` claim is only really fulfilled
// as long as nothing can ever alias the aforementioned raw pointer.
// As explained in the documentation of the `async` field,
// this is a tricky condition (because of kernel aliasing),
// which we however need to uphold regardless of the `Send` property --
// so claiming `Send` should not introduce any additional issues.
unsafe impl Send for OsIpcReceiver {}




pub struct OsIpcReceiverSet {
    /// Our incrementor, for unique handle IDs.
    incrementor: RangeFrom<u64>,

    /// The IOCP that we select on.
    iocp: WinHandle,

    /// The set of receivers, stored as MessageReaders.
    readers: Vec<MessageReader>,

    /// Readers that got closed before adding them to the set.
    ///
    /// These need to report a "closed" event on the next `select()` call.
    ///
    /// Only the `entry_id` is necessary for that.
    closed_readers: Vec<u64>,
}

impl Drop for OsIpcReceiverSet {
    fn drop(&mut self) {
        // We need to cancel any in-flight read operations before we drop the receivers,
        // since otherwise the receivers' `Drop` implementation would try to cancel them --
        // but the implementation there doesn't work for receivers in a set...
        for reader in &mut self.readers {
            reader.issue_async_cancel();
        }

        // Wait for any reads still in flight to complete,
        // thus freeing the associated async data.
        self.readers.retain(|r| r.r#async.is_some());
        while !self.readers.is_empty() {
            // We unwrap the outer result (can't deal with the IOCP call failing here),
            // but don't care about the actual results of the completed read operations.
            let _ = self.fetch_iocp_result().unwrap();
        }
    }
}

impl OsIpcReceiverSet {
    pub fn new() -> Result<OsIpcReceiverSet, WinError> {
        unsafe {
            let iocp = windows::Win32::System::IO::CreateIoCompletionPort(windows::Win32::Foundation::INVALID_HANDLE_VALUE, None, 0, 0)?;

            Ok(OsIpcReceiverSet {
                incrementor: 0..,
                iocp: WinHandle::new(iocp),
                readers: vec![],
                closed_readers: vec![],
            })
        }
    }

    pub fn add(&mut self, receiver: OsIpcReceiver) -> Result<u64, WinIpcError> {
        // consume the receiver, and take the reader out
        let mut reader = receiver.reader.into_inner();

        let entry_id = self.incrementor.next().unwrap();

        match reader.add_to_iocp(&self.iocp, entry_id) {
            Ok(()) => {
                win32_trace!(
                    "[# {:?}] ReceiverSet add {:?}, id {}",
                    self.iocp.as_raw(),
                    reader.get_raw_handle(),
                    entry_id
                );
                self.readers.push(reader);
            },
            Err(WinIpcError::ChannelClosed) => {
                // If the sender has already been closed, we need to stash this information,
                // so we can report the corresponding event in the next `select()` call.
                win32_trace!(
                    "[# {:?}] ReceiverSet add {:?} (closed), id {}",
                    self.iocp.as_raw(),
                    reader.get_raw_handle(),
                    entry_id
                );
                self.closed_readers.push(entry_id);
            },
            Err(err) => return Err(err),
        };

        Ok(entry_id)
    }

    /// Conclude an async read operation on any of the receivers in the set.
    ///
    /// This fetches a completion event from the set's IOCP;
    /// finds the matching `MessageReader`;
    /// removes it from the list of active readers
    /// (since no operation is in flight on this reader at this point);
    /// and notifies the reader of the completion event.
    ///
    /// If the IOCP call is successful, this returns the respective reader,
    /// along with an inner status describing the type of event received.
    /// This can be a success status, indicating data has been received,
    /// and is ready to be picked up with `get_message()` on the reader;
    /// an error status indicating that the sender connected to this receiver
    /// has closed the connection;
    /// or some other I/O error status.
    ///
    /// Unless a "closed" status is returned,
    /// the respective reader remains a member of the set,
    /// and the caller should add it back to the list of active readers
    /// after kicking off a new read operation on it.
    fn fetch_iocp_result(&mut self) -> Result<(MessageReader, Result<(), WinIpcError>), WinError> {
        unsafe {
            let mut nbytes: u32 = 0;
            let mut completion_key = 0;
            let mut ov_ptr: *mut OVERLAPPED = ptr::null_mut();
            // XXX use GetQueuedCompletionStatusEx to dequeue multiple CP at once!
            let ok = windows::Win32::System::IO::GetQueuedCompletionStatus(
                self.iocp.as_raw(),
                &mut nbytes,
                &mut completion_key,
                &mut ov_ptr,
                windows::Win32::System::Threading::INFINITE,
            );
            win32_trace!(
                "[# {:?}] GetQueuedCS -> ok:{} nbytes:{} key:{:?}",
                self.iocp.as_raw(),
                bool::from(ok),
                nbytes,
                completion_key
            );
            let io_result = if ok == false {
                let err = WinError::from_win32();

                // If the OVERLAPPED result is NULL, then the
                // function call itself failed or timed out.
                // Otherwise, the async IO operation failed, and
                // we want to hand the error to notify_completion below.
                if ov_ptr.is_null() {
                    return Err(err);
                }

                Err(err)
            } else {
                Ok(())
            };

            assert!(!ov_ptr.is_null());
            assert!(completion_key != 0);

            // Find the matching receiver
            let (reader_index, _) = self
                .readers
                .iter()
                .enumerate()
                .find(|&(_, ref reader)| {
                    let raw_handle = reader.r#async.as_ref().unwrap().alias().handle.as_raw();
                    raw_handle.0 as usize == completion_key
                })
                .expect(
                    "Windows IPC ReceiverSet got notification for a receiver it doesn't know about",
                );

            // Remove the entry from the set for now -- we will re-add it later,
            // if we can successfully initiate another async read operation.
            let mut reader = self.readers.swap_remove(reader_index);

            win32_trace!(
                "[# {:?}] result for receiver {:?}",
                self.iocp.as_raw(),
                reader.get_raw_handle()
            );

            // tell it about the completed IO op
            let result = reader.notify_completion(io_result);

            Ok((reader, result))
        }
    }

    pub fn select(&mut self) -> Result<Vec<OsIpcSelectionResult>, WinIpcError> {
        assert!(
            self.readers.len() + self.closed_readers.len() > 0,
            "selecting with no objects?"
        );
        win32_trace!(
            "[# {:?}] select() with {} active and {} closed receivers",
            self.iocp.as_raw(),
            self.readers.len(),
            self.closed_readers.len()
        );

        // the ultimate results
        let mut selection_results = vec![];

        // Process any pending "closed" events
        // from channels that got closed before being added to the set,
        // and thus received "closed" notifications while being added.
        self.closed_readers.drain(..).for_each(|entry_id| {
            selection_results.push(OsIpcSelectionResult::ChannelClosed(entry_id))
        });

        // Do this in a loop, because we may need to dequeue multiple packets to
        // read a complete message.
        while selection_results.is_empty() {
            let (mut reader, result) = self.fetch_iocp_result()?;

            let mut closed = match result {
                Ok(()) => false,
                Err(WinIpcError::ChannelClosed) => true,
                Err(err) => return Err(err.into()),
            };

            if !closed {
                // Drain as many messages as we can.
                while let Some((data, channels, shmems)) = reader.get_message()? {
                    win32_trace!(
                        "[# {:?}] receiver {:?} ({}) got a message",
                        self.iocp.as_raw(),
                        reader.get_raw_handle(),
                        reader.entry_id.unwrap()
                    );
                    selection_results.push(OsIpcSelectionResult::DataReceived(
                        reader.entry_id.unwrap(),
                        data,
                        channels,
                        shmems,
                    ));
                }
                win32_trace!(
                    "[# {:?}] receiver {:?} ({}) -- no message",
                    self.iocp.as_raw(),
                    reader.get_raw_handle(),
                    reader.entry_id.unwrap()
                );

                // Now that we are done frobbing the buffer,
                // we can safely initiate the next async read operation.
                closed = match reader.start_read() {
                    Ok(()) => {
                        // We just successfully reinstated it as an active reader --
                        // so add it back to the list.
                        //
                        // Note: `take()` is a workaround for the compiler not seeing
                        // that we won't actually be using it anymore after this...
                        self.readers.push(reader.take());
                        false
                    },
                    Err(WinIpcError::ChannelClosed) => true,
                    Err(err) => return Err(err),
                };
            }

            // If we got a "sender closed" notification --
            // either instead of new data,
            // or while trying to re-initiate an async read after receiving data --
            // add an event to this effect to the result list.
            if closed {
                win32_trace!(
                    "[# {:?}] receiver {:?} ({}) -- now closed!",
                    self.iocp.as_raw(),
                    reader.get_raw_handle(),
                    reader.entry_id.unwrap()
                );
                selection_results.push(OsIpcSelectionResult::ChannelClosed(
                    reader.entry_id.unwrap(),
                ));
            }
        }

        win32_trace!("select() -> {} results", selection_results.len());
        Ok(selection_results)
    }
}
