use tokio::io::AsyncWriteExt;
use tokio::sync::oneshot;
use tracing::debug;

use std::collections::HashMap;

use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::Ordering;

use crate::error::ConnectError;
use std::mem;
use std::task::{Context, Poll};
use tokio::io;
use tokio::sync::mpsc;
use tokio::time::{interval, Duration, Interval, MissedTickBehavior};
pub type Error = Box<dyn std::error::Error + Send + Sync + 'static>;

use crate::connection::*;
use crate::connector::*;
use crate::{
    ClientOp, Command, Event, Message, Multiplexer, PublishMessage, ServerInfo, ServerOp, Subject,
    Subscription, MULTIPLEXER_SID,
};

/// A connection handler which facilitates communication from channels to a single shared connection.
pub(crate) struct ConnectionHandler {
    connection: Connection,
    connector: Connector,
    subscriptions: HashMap<u64, Subscription>,
    multiplexer: Option<Multiplexer>,
    pending_pings: usize,
    info_sender: tokio::sync::watch::Sender<ServerInfo>,
    ping_interval: Interval,
    should_reconnect: bool,
    flush_observers: Vec<oneshot::Sender<()>>,
    is_draining: bool,
}

impl ConnectionHandler {
    pub(crate) fn new(
        connection: Connection,
        connector: Connector,
        info_sender: tokio::sync::watch::Sender<ServerInfo>,
        ping_period: Duration,
    ) -> ConnectionHandler {
        let mut ping_interval = interval(ping_period);
        ping_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);

        ConnectionHandler {
            connection,
            connector,
            subscriptions: HashMap::new(),
            multiplexer: None,
            pending_pings: 0,
            info_sender,
            ping_interval,
            should_reconnect: false,
            flush_observers: Vec::new(),
            is_draining: false,
        }
    }

    pub(crate) async fn process<'a>(&'a mut self, receiver: &'a mut mpsc::Receiver<Command>) {
        struct ProcessFut<'a> {
            handler: &'a mut ConnectionHandler,
            receiver: &'a mut mpsc::Receiver<Command>,
            recv_buf: &'a mut Vec<Command>,
        }

        enum ExitReason {
            Disconnected(Option<io::Error>),
            ReconnectRequested,
            Closed,
        }

        impl ProcessFut<'_> {
            const RECV_CHUNK_SIZE: usize = 16;

            #[cold]
            fn ping(&mut self) -> Poll<ExitReason> {
                self.handler.pending_pings += 1;

                if self.handler.pending_pings > crate::MAX_PENDING_PINGS {
                    debug!(
                        "pending pings {}, max pings {}. disconnecting",
                        self.handler.pending_pings,
                        crate::MAX_PENDING_PINGS
                    );

                    Poll::Ready(ExitReason::Disconnected(None))
                } else {
                    //发送ping
                    self.handler.connection.enqueue_write_op(&ClientOp::Ping);
                    Poll::Pending
                }
            }
        }

        impl Future for ProcessFut<'_> {
            type Output = ExitReason;

            /// Drives the connection forward.
            ///
            /// Returns one of the following:
            ///
            /// * `Poll::Pending` means that the connection
            ///   is blocked on all fronts or there are
            ///   no commands to send or receive
            /// * `Poll::Ready(ExitReason::Disconnected(_))` means
            ///   that an I/O operation failed and the connection
            ///   is considered dead.
            /// * `Poll::Ready(ExitReason::Closed)` means that
            ///   [`Self::receiver`] was closed, so there's nothing
            ///   more for us to do than to exit the client.
            fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
                //如果到了定时发送数据，即发送ping
                // We need to be sure the waker is registered, therefore we need to poll until we
                // get a `Poll::Pending`. With a sane interval delay, this means that the loop
                // breaks at the second iteration.
                while self.handler.ping_interval.poll_tick(cx).is_ready() {
                    if let Poll::Ready(exit) = self.ping() {
                        return Poll::Ready(exit);
                    }
                }

                //处理服务器消息
                loop {
                    //因为tcp 不阻塞，所以这里会跳出loop
                    match self.handler.connection.poll_read_op(cx) {
                        Poll::Pending => break,
                        //服务器回传消息
                        Poll::Ready(Ok(Some(server_op))) => {
                            self.handler.recv_server_msg(server_op);
                        }
                        Poll::Ready(Ok(None)) => {
                            return Poll::Ready(ExitReason::Disconnected(None))
                        }
                        Poll::Ready(Err(err)) => {
                            return Poll::Ready(ExitReason::Disconnected(Some(err)))
                        }
                    }
                }

                // Before handling any commands, drop any subscriptions which are draining
                // Note: safe to assume subscription drain has completed at this point, as we would have flushed
                // all outgoing UNSUB messages in the previous call to this fn, and we would have processed and
                // delivered any remaining messages to the subscription in the loop above.
                self.handler.subscriptions.retain(|_, s| !s.is_draining);

                if self.handler.is_draining {
                    // The entire connection is draining. This means we flushed outgoing messages in the previous
                    // call to this fn, we handled any remaining messages from the server in the loop above, and
                    // all subs were drained, so drain is complete and we should exit instead of processing any
                    // further messages
                    return Poll::Ready(ExitReason::Closed);
                }

                //发送消息到服务器
                // WARNING: after the following loop `handle_command`,
                // or other functions which call `enqueue_write_op`,
                // cannot be called anymore. Runtime wakeups won't
                // trigger a call to `poll_write`
                let mut made_progress = true;
                loop {
                    while !self.handler.connection.is_write_buf_full() {
                        debug_assert!(self.recv_buf.is_empty());

                        let Self {
                            recv_buf,
                            handler,
                            receiver,
                        } = &mut *self;

                        match receiver.poll_recv_many(cx, recv_buf, Self::RECV_CHUNK_SIZE) {
                            Poll::Pending => break,
                            Poll::Ready(1..) => {
                                made_progress = true;
                                for cmd in recv_buf.drain(..) {
                                    handler.send_msg_to_server(cmd);
                                }
                            }
                            // TODO: replace `_` with `0` after bumping MSRV to 1.75
                            Poll::Ready(_) => return Poll::Ready(ExitReason::Closed),
                        }
                    }

                    // The first round will poll both from
                    // the `receiver` and the writer, giving
                    // them both a chance to make progress
                    // and register `Waker`s.
                    //
                    // If writing is `Poll::Pending` we exit.
                    //
                    // If writing is completed we can repeat the entire
                    // cycle as long as the `receiver` doesn't end-up
                    // `Poll::Pending` immediately.
                    if !mem::take(&mut made_progress) {
                        break;
                    }

                    match self.handler.connection.poll_write(cx) {
                        Poll::Pending => {
                            // Write buffer couldn't be fully emptied
                            break;
                        }
                        Poll::Ready(Ok(())) => {
                            // Write buffer is empty
                            continue;
                        }
                        Poll::Ready(Err(err)) => {
                            return Poll::Ready(ExitReason::Disconnected(Some(err)))
                        }
                    }
                }

                if let (ShouldFlush::Yes, _) | (ShouldFlush::No, false) = (
                    self.handler.connection.should_flush(),
                    self.handler.flush_observers.is_empty(),
                ) {
                    
                    match self.handler.connection.poll_flush(cx) {
                        Poll::Pending => {}
                        Poll::Ready(Ok(())) => {
                            for observer in self.handler.flush_observers.drain(..) {
                                let _ = observer.send(());
                            }
                        }
                        Poll::Ready(Err(err)) => {
                            return Poll::Ready(ExitReason::Disconnected(Some(err)))
                        }
                    }
                }

                if mem::take(&mut self.handler.should_reconnect) {
                    return Poll::Ready(ExitReason::ReconnectRequested);
                }

                Poll::Pending
            }
        }

        //
        let mut recv_buf = Vec::with_capacity(ProcessFut::RECV_CHUNK_SIZE);
        loop {
            let process = ProcessFut {
                handler: self,
                receiver,
                recv_buf: &mut recv_buf,
            };

            match process.await {
                ExitReason::Disconnected(err) => {
                    debug!(?err, "disconnected");
                    if self.handle_disconnect().await.is_err() {
                        break;
                    };
                    debug!("reconnected");
                }
                ExitReason::Closed => {
                    // Safe to ignore result as we're shutting down anyway
                    self.connector.events_tx.try_send(Event::Closed).ok();
                    break;
                }
                ExitReason::ReconnectRequested => {
                    debug!("reconnect requested");
                    // Should be ok to ingore error, as that means we are not in connected state.
                    self.connection.stream.shutdown().await.ok();
                    if self.handle_disconnect().await.is_err() {
                        break;
                    };
                }
            }
        }
    }

    //服务器回传消息
    fn recv_server_msg(&mut self, server_op: ServerOp) {
        self.ping_interval.reset();

        match server_op {
            ServerOp::Ping => {
                self.connection.enqueue_write_op(&ClientOp::Pong);
            }
            ServerOp::Pong => {
                debug!("received PONG");
                self.pending_pings = self.pending_pings.saturating_sub(1);
            }
            ServerOp::Error(error) => {
                self.connector
                    .events_tx
                    .try_send(Event::ServerError(error))
                    .ok();
            }
            ServerOp::Message {
                sid,
                subject,
                reply,
                payload,
                headers,
                status,
                description,
                length,
            } => {
                self.connector
                    .connect_stats
                    .in_messages
                    .add(1, Ordering::Relaxed);

                if let Some(subscription) = self.subscriptions.get_mut(&sid) {
                    let message: Message = Message {
                        subject,
                        reply,
                        payload,
                        headers,
                        status,
                        description,
                        length,
                    };

                    // if the channel for subscription was dropped, remove the
                    // subscription from the map and unsubscribe.
                    match subscription.sender.try_send(message) {
                        Ok(_) => {
                            subscription.delivered += 1;
                            // if this `Subscription` has set `max` value, check if it
                            // was reached. If yes, remove the `Subscription` and in
                            // the result, `drop` the `sender` channel.
                            if let Some(max) = subscription.max {
                                if subscription.delivered.ge(&max) {
                                    self.subscriptions.remove(&sid);
                                }
                            }
                        }
                        Err(mpsc::error::TrySendError::Full(_)) => {
                            self.connector
                                .events_tx
                                .try_send(Event::SlowConsumer(sid))
                                .ok();
                        }
                        Err(mpsc::error::TrySendError::Closed(_)) => {
                            self.subscriptions.remove(&sid);
                            self.connection
                                .enqueue_write_op(&ClientOp::Unsubscribe { sid, max: None });
                        }
                    }
                } else if sid == MULTIPLEXER_SID {
                    if let Some(multiplexer) = self.multiplexer.as_mut() {
                        let maybe_token =
                            subject.strip_prefix(multiplexer.prefix.as_ref()).to_owned();

                        if let Some(token) = maybe_token {
                            if let Some(sender) = multiplexer.senders.remove(token) {
                                let message = Message {
                                    subject,
                                    reply,
                                    payload,
                                    headers,
                                    status,
                                    description,
                                    length,
                                };

                                let _ = sender.send(message);
                            }
                        }
                    }
                }
            }
            // TODO: we should probably update advertised server list here too.
            ServerOp::Info(info) => {
                if info.lame_duck_mode {
                    self.connector.events_tx.try_send(Event::LameDuckMode).ok();
                }
            }

            _ => {
                // TODO: don't ignore.
            }
        }
    }
    //发送消息
    fn send_msg_to_server(&mut self, command: Command) {
        self.ping_interval.reset();

        match command {
            Command::Unsubscribe { sid, max } => {
                if let Some(subscription) = self.subscriptions.get_mut(&sid) {
                    subscription.max = max;
                    match subscription.max {
                        Some(n) => {
                            if subscription.delivered >= n {
                                self.subscriptions.remove(&sid);
                            }
                        }
                        None => {
                            self.subscriptions.remove(&sid);
                        }
                    }

                    self.connection
                        .enqueue_write_op(&ClientOp::Unsubscribe { sid, max });
                }
            }
            Command::Flush { observer } => {
                self.flush_observers.push(observer);
            }
            Command::Drain { sid } => {
                let mut drain_sub = |sid: u64, sub: &mut Subscription| {
                    sub.is_draining = true;
                    self.connection
                        .enqueue_write_op(&ClientOp::Unsubscribe { sid, max: None });
                };

                if let Some(sid) = sid {
                    if let Some(sub) = self.subscriptions.get_mut(&sid) {
                        drain_sub(sid, sub);
                    }
                } else {
                    // sid isn't set, so drain the whole client
                    self.connector.events_tx.try_send(Event::Draining).ok();
                    self.is_draining = true;
                    for (&sid, sub) in self.subscriptions.iter_mut() {
                        drain_sub(sid, sub);
                    }
                }
            }
            Command::Subscribe {
                sid,
                subject,
                queue_group,
                sender,
            } => {
                let subscription = Subscription {
                    sender,
                    delivered: 0,
                    max: None,
                    subject: subject.to_owned(),
                    queue_group: queue_group.to_owned(),
                    is_draining: false,
                };

                self.subscriptions.insert(sid, subscription);

                self.connection.enqueue_write_op(&ClientOp::Subscribe {
                    sid,
                    subject,
                    queue_group,
                });
            }
            Command::Request {
                subject,
                payload,
                respond,
                headers,
                sender,
            } => {
                let (prefix, token) = respond.rsplit_once('.').expect("malformed request subject");

                let multiplexer = if let Some(multiplexer) = self.multiplexer.as_mut() {
                    multiplexer
                } else {
                    let prefix = Subject::from(format!("{}.{}.", prefix, nuid::next()));
                    let subject = Subject::from(format!("{}*", prefix));

                    self.connection.enqueue_write_op(&ClientOp::Subscribe {
                        sid: MULTIPLEXER_SID,
                        subject: subject.clone(),
                        queue_group: None,
                    });

                    self.multiplexer.insert(Multiplexer {
                        subject,
                        prefix,
                        senders: HashMap::new(),
                    })
                };
                self.connector
                    .connect_stats
                    .out_messages
                    .add(1, Ordering::Relaxed);

                multiplexer.senders.insert(token.to_owned(), sender);

                let respond: Subject = format!("{}{}", multiplexer.prefix, token).into();

                let pub_op = ClientOp::Publish {
                    subject,
                    payload,
                    respond: Some(respond),
                    headers,
                };

                self.connection.enqueue_write_op(&pub_op);
            }

            Command::Publish(PublishMessage {
                subject,
                payload,
                reply: respond,
                headers,
            }) => {
                self.connector
                    .connect_stats
                    .out_messages
                    .add(1, Ordering::Relaxed);

                let header_len = headers
                    .as_ref()
                    .map(|headers| headers.len())
                    .unwrap_or_default();

                self.connector.connect_stats.out_bytes.add(
                    (payload.len()
                        + respond.as_ref().map_or_else(|| 0, |r| r.len())
                        + subject.len()
                        + header_len) as u64,
                    Ordering::Relaxed,
                );

                self.connection.enqueue_write_op(&ClientOp::Publish {
                    subject,
                    payload,
                    respond,
                    headers,
                });
            }

            Command::Reconnect => {
                self.should_reconnect = true;
            }
        }
    }
    //断开连接
    async fn handle_disconnect(&mut self) -> Result<(), ConnectError> {
        self.pending_pings = 0;
        self.connector
            .events_tx
            .try_send(crate::Event::Disconnected)
            .ok();
        self.connector.state_tx.send(State::Disconnected).ok();
        self.handle_reconnect().await
    }
    //重连接
    async fn handle_reconnect(&mut self) -> Result<(), ConnectError> {
        //重连
        let (info, connection) = self.connector.connect().await?;
        self.connection = connection;
        let _ = self.info_sender.send(info);

        self.subscriptions
            .retain(|_, subscription| !subscription.sender.is_closed());

        //重新订阅主题
        for (sid, subscription) in &self.subscriptions {
            self.connection.enqueue_write_op(&ClientOp::Subscribe {
                sid: *sid,
                subject: subscription.subject.to_owned(),
                queue_group: subscription.queue_group.to_owned(),
            });
        }

        if let Some(multiplexer) = &self.multiplexer {
            self.connection.enqueue_write_op(&ClientOp::Subscribe {
                sid: MULTIPLEXER_SID,
                subject: multiplexer.subject.to_owned(),
                queue_group: None,
            });
        }
        Ok(())
    }
}
