use std::collections::{HashMap, HashSet, VecDeque};
use std::ops::Range;

use crossbeam::channel::Sender;
use log::debug;

use super::log::{Index, Log};
use super::message::{Envelope, Message, ReadSequence, Request, RequestID, Response, Status};
use super::state::State;
use super::{ELECTION_TIMEOUT_RANGE, HEARTBEAT_INTERVAL, MAX_APPEND_ENTRIES};
use crate::error::Result;

/// A node ID, unique within a cluster. Assigned manually when started.
pub type NodeID = u8;

/// A leader term number. Increases monotonically on elections.
pub type Term = u64;

/// A logical clock interval as number of ticks.
pub type Ticks = u8;

/// Raft node options.
#[derive(Clone, Debug, PartialEq)]
pub struct Options {
    /// The number of ticks between leader heartbeats.
    pub heartbeat_interval: Ticks,
    /// The range of randomized election timeouts for followers and candidates.
    pub election_timeout_range: Range<Ticks>,
    /// Maximum number of entries to send in a single Append message.
    pub max_append_entries: usize,
}

impl Default for Options {
    fn default() -> Self {
        Self {
            heartbeat_interval: HEARTBEAT_INTERVAL,
            election_timeout_range: ELECTION_TIMEOUT_RANGE,
            max_append_entries: MAX_APPEND_ENTRIES,
        }
    }
}

/// A Raft node with a dynamic role. This implements the Raft distributed
/// consensus protocol, see the `raft` module documentation for more info.
///
/// The node is driven synchronously by processing inbound messages via `step()`
/// and by advancing time via `tick()`. These methods consume the node and
/// return a new one with a possibly different role. Outbound messages are sent
/// via the given `tx` channel, and must be delivered to peers or clients.
///
/// This enum is the public interface to the node, with a closed set of roles.
/// It wraps the `RawNode<Role>` types, which implement the actual node logic.
/// The enum allows ergonomic use across role transitions since it can represent
/// all roles, e.g.: `node = node.step()?`.
pub enum Node {
    /// A leader processes client requests and replicates entries to followers.
    Leader(RawNode<Leader>),
}

impl Node {
    /// Creates a new Raft node. It starts as a leaderless follower, waiting to
    /// hear from a leader or otherwise transitioning to candidate and
    /// campaigning for leadership. In the case of a single-node cluster (no
    /// peers), the node immediately transitions to leader when created.
    pub fn new(
        id: NodeID,
        log: Log,
        state: Box<dyn State>,
        tx: Sender<Envelope>,
    ) -> Result<Self> {
        let node = RawNode::new(id, log, state, tx)?;
        Ok(node.into())
    }

    /// Returns the node's ID.
    pub fn id(&self) -> NodeID {
        match self {
            Self::Leader(node) => node.id,
        }
    }

    /// Returns the node's term.
    pub fn term(&self) -> Term {
        match self {
            Self::Leader(node) => node.term(),
        }
    }

    /// Processes an inbound message.
    pub fn step(self, msg: Envelope) -> Result<Self> {
        assert_eq!(msg.to, self.id(), "message to other node: {msg:?}");
        debug!("Stepping {msg:?}");

        match self {
            Self::Leader(node) => node.step(msg),
        }
    }

}

impl From<RawNode<Leader>> for Node {
    fn from(node: RawNode<Leader>) -> Self {
        Node::Leader(node)
    }
}

/// Marker trait for a Raft role: leader, follower, or candidate.
pub trait Role {}

/// A Raft node with role R.
///
/// This implements the typestate pattern, where individual node states (roles)
/// are encoded as RawNode<Role>. See http://cliffle.com/blog/rust-typestate/.
pub struct RawNode<R: Role> {
    /// The node ID. Must be unique in the cluster.
    id: NodeID,
    /// The Raft log, which stores client commands to be executed.
    log: Log,
    /// The Raft state machine, which executes client commands from the log.
    state: Box<dyn State>,
    /// Channel for sending outbound messages to other nodes.
    tx: Sender<Envelope>,
    /// Role-specific state.
    role: R,
}

impl<R: Role> RawNode<R> {

    /// Returns the node's current term.
    fn term(&self) -> Term {
        self.log.get_term_vote().0
    }

    /// Returns the cluster size as number of nodes.
    fn cluster_size(&self) -> usize {
        1
    }

    /// Returns the cluster quorum size (strict majority).
    fn quorum_size(&self) -> usize {
        self.cluster_size() / 2 + 1
    }

    /// Returns the quorum value (i.e. median) of the given unsorted vector. It
    /// must have the same length as the cluster size.
    fn quorum_value<T: Ord + Copy>(&self, mut values: Vec<T>) -> T {
        assert_eq!(values.len(), self.cluster_size(), "vector size must match cluster size");
        *values.select_nth_unstable_by(self.quorum_size() - 1, |a, b| a.cmp(b).reverse()).1
    }

    /// Sends a message to the given recipient.
    fn send(&self, to: NodeID, message: Message) -> Result<()> {
        Self::send_via(&self.tx, Envelope { from: self.id, to, term: self.term(), message })
    }

    /// Sends a message via the given channel. This avoid borrowing self, to
    /// allow sending while holding partial borrows of self.
    fn send_via(tx: &Sender<Envelope>, msg: Envelope) -> Result<()> {
        debug!("Sending {msg:?}");
        Ok(tx.send(msg)?)
    }
}

/// A leader serves client requests and replicates the log to followers.
/// If the leader loses leadership, all client requests are aborted.
pub struct Leader {
    /// Follower replication progress.
    progress: HashMap<NodeID, Progress>,
    /// Tracks pending write requests by log index. Added when the write is
    /// proposed and appended to the leader's log, and removed when the command
    /// is applied to the state machine, returning the result to the client.
    writes: HashMap<Index, Write>,
    /// Tracks pending read requests. For linearizability, read requests are
    /// assigned a sequence number and only executed once a quorum of nodes have
    /// confirmed that we're still the leader. Otherwise, an old leader could
    /// serve stale reads if a new leader has been elected elsewhere.
    reads: VecDeque<Read>,
    /// The read sequence number used for the last read. Initialized to 0 in
    /// this term, and incremented for every read command.
    read_seq: ReadSequence,
}

/// Per-follower replication progress (in this term).
struct Progress {
    /// The highest index where the follower's log is known to match the leader.
    /// Initialized to 0, increases monotonically.
    match_index: Index,
    /// The last read sequence number confirmed by this follower. To avoid stale
    /// reads on leader changes, a read is only served once its sequence number
    /// is confirmed by a quorum.
    read_seq: ReadSequence,
}

/// A pending client write request.
struct Write {
    /// The node which submitted the write.
    from: NodeID,
    /// The write request ID.
    id: RequestID,
}

/// A pending client read request.
struct Read {
    /// The sequence number of this read.
    seq: ReadSequence,
    /// The node which submitted the read.
    from: NodeID,
    /// The read request ID.
    id: RequestID,
    /// The read command.
    command: Vec<u8>,
}

impl Leader {
    /// Creates a new leader role.
    fn new() -> Self {
        let peers = HashSet::<u8>::new();
        let progress = peers
            .into_iter()
            .map(|p| (p, Progress {match_index: 0, read_seq: 0 }))
            .collect();
        Self {
            progress,
            writes: HashMap::new(),
            reads: VecDeque::new(),
            read_seq: 0,
        }
    }
}

impl Role for Leader {}

impl RawNode<Leader> {
    fn new(
        id: NodeID,
        log: Log,
        state: Box<dyn State>,
        tx: Sender<Envelope>,
    ) -> Result<Self> {
        let role = Leader::new();
        let node = Self { id, log, state, tx, role };
 
        Ok(node)
    }


    /// Processes an inbound message.
    fn step(mut self, msg: Envelope) -> Result<Node> {

        match msg.message {

            // A client submitted a write request. Propose it, and wait until
            // it's replicated and applied to the state machine before returning
            // the response to the client.
            Message::ClientRequest { id, request: Request::Write(command) } => {
                let index = self.propose(Some(command))?;
                self.role.writes.insert(index, Write { from: msg.from, id });
                self.maybe_commit_and_apply()?;
            }

            // A client submitted a read request. To ensure linearizability, we
            // must confirm that we are still the leader by sending the read's
            // sequence number and wait for quorum confirmation.
            Message::ClientRequest { id, request: Request::Read(command) } => {
                self.role.read_seq += 1;
                let read = Read { seq: self.role.read_seq, from: msg.from, id, command };
                self.role.reads.push_back(read);
                self.maybe_read()?;
            }

            // A client submitted a status command.
            Message::ClientRequest { id, request: Request::Status } => {
                let response = self.status().map(Response::Status);
                self.send(msg.from, Message::ClientResponse { id, response })?;
            }

            // Leaders don't proxy client requests.
            Message::ClientResponse { .. } => panic!("unexpected message {msg:?}"),
        }

        Ok(self.into())
    }

    /// Proposes a command for consensus by appending it to our log and
    /// replicating it to peers. If successful, it will eventually be committed
    /// and applied to the state machine.
    fn propose(&mut self, command: Option<Vec<u8>>) -> Result<Index> {
        let index = self.log.append(command)?;
        Ok(index)
    }

    /// Commits new entries that have been replicated to a quorum and applies
    /// them to the state machine, returning results to clients.
    fn maybe_commit_and_apply(&mut self) -> Result<Index> {
        // Determine the new commit index by quorum.
        let (last_index, _) = self.log.get_last_index();
        let commit_index = self.quorum_value(
            self.role.progress.values().map(|p| p.match_index).chain([last_index]).collect(),
        );

        // If the commit index doesn't advance, do nothing. We don't assert on
        // this, since the quorum value may regress e.g. following a restart or
        // leader change where followers are initialized with match index 0.
        let (old_index, old_term) = self.log.get_commit_index();
        if commit_index <= old_index {
            return Ok(old_index);
        }

        // We can only safely commit an entry from our own term (see section
        // 5.4.2 in Raft paper).
        match self.log.get(commit_index)? {
            Some(entry) if entry.term == self.term() => {}
            Some(_) => return Ok(old_index),
            None => panic!("commit index {commit_index} missing"),
        }

        // Commit entries.
        self.log.commit(commit_index)?;

        // Apply entries and respond to clients.
        let term = self.term();
        let mut iter = self.log.scan_apply(self.state.get_applied_index());
        while let Some(entry) = iter.next().transpose()? {
            debug!("Applying {entry:?}");
            let write = self.role.writes.remove(&entry.index);
            let result = self.state.apply(entry);

            if let Some(Write { id, from: to }) = write {
                let message = Message::ClientResponse { id, response: result.map(Response::Write) };
                Self::send_via(&self.tx, Envelope { from: self.id, term, to, message })?;
            }
        }
        drop(iter);

        // If the commit term changed, there may be pending reads waiting for us
        // to commit and apply an entry from our own term. Execute them.
        if old_term != self.term() {
            self.maybe_read()?;
        }

        Ok(commit_index)
    }

    /// Executes any ready read requests, where a quorum have confirmed that
    /// we're still the leader for the read sequences.
    fn maybe_read(&mut self) -> Result<()> {
        if self.role.reads.is_empty() {
            return Ok(());
        }

        // It's only safe to read if we've committed and applied an entry from
        // our own term (the leader appends an entry when elected). Otherwise we
        // may be behind on application and serve stale reads.
        let (commit_index, commit_term) = self.log.get_commit_index();
        let applied_index = self.state.get_applied_index();
        if commit_term < self.term() || applied_index < commit_index {
            return Ok(());
        }

        // Determine the maximum read sequence confirmed by quorum.
        let quorum_read_seq = self.quorum_value(
            self.role.progress.values().map(|p| p.read_seq).chain([self.role.read_seq]).collect(),
        );

        // Execute ready reads. The VecDeque is ordered by read_seq, so we
        // can keep pulling until we hit quorum_read_seq.
        while let Some(read) = self.role.reads.front() {
            if read.seq > quorum_read_seq {
                break;
            }
            let read = self.role.reads.pop_front().unwrap();
            let response = self.state.read(read.command).map(Response::Read);
            self.send(read.from, Message::ClientResponse { id: read.id, response })?;
        }
        Ok(())
    }


    /// Generates cluster status.
    fn status(&mut self) -> Result<Status> {
        Ok(Status {
            leader: self.id,
            term: self.term(),
            match_index: self
                .role
                .progress
                .iter()
                .map(|(id, p)| (*id, p.match_index))
                .chain(std::iter::once((self.id, self.log.get_last_index().0)))
                .collect(),
            commit_index: self.log.get_commit_index().0,
            applied_index: self.state.get_applied_index(),
            storage: self.log.status()?,
        })
    }
}