mod destination;
mod sender;

use std::{fmt::Debug, sync::Arc};

use axum::async_trait;
use destination::Destination;
use lazy_static::lazy_static;
use ruma::{api::OutgoingRequest, OwnedServerName, RoomId, ServerName};
use tokio::sync::Mutex;

use crate::{server::Service as ServiceTrait, server::ARGS, Error, Result};

use super::{global, rooms, CONFIG, SERVICES};

pub struct Service {
    // server: Arc<Server>,
    services: Services,
    // pub db: Data,
    sender: loole::Sender<Msg>,
    receiver: Mutex<loole::Receiver<Msg>>,
}

lazy_static! {
    pub static ref SERVICE: Arc<Service> = Service::build(&ARGS);
}

struct Services {
    client: Arc<super::client::Service>,
    // globals: Dep<globals::Service>,
    global: Arc<global::Service>,
    config: super::Config,
    // resolver: Dep<resolver::Service>,
    // state: Dep<rooms::state::Service>,
    state_cache: Arc<rooms::state_cache::Service>,
    // user: Dep<rooms::user::Service>,
    // users: Dep<users::Service>,
    // timeline: Dep<rooms::timeline::Service>,
    // account_data: Dep<account_data::Service>,
    // pusher: Dep<pusher::Service>,
}

#[derive(Clone, Debug, PartialEq, Eq)]
struct Msg {
    dest: Destination,
    event: SendingEvent,
    queue_id: Vec<u8>,
}

#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum SendingEvent {
    Pdu(Vec<u8>), // pduid
    Edu(Vec<u8>), // pdu json
    Flush,        // none
}

#[async_trait]
impl ServiceTrait for Service {
    fn build(args: &crate::server::Args) -> Arc<Self> {
        let (sender, receiver) = loole::unbounded();
        Arc::new(Self {
            // server: args.server.clone(),
            services: Services {
                config: CONFIG.clone(),
                client: SERVICES.client.clone(),
                // client: args.depend::<client::Service>("client"),
                global: global::SERVICE.clone(),
                // resolver: args.depend::<resolver::Service>("resolver"),
                // state: args.depend::<rooms::state::Service>("rooms::state"),
                state_cache: rooms::state_cache::SERVICE.clone(),
                // user: args.depend::<rooms::user::Service>("rooms::user"),
                // users: args.depend::<users::Service>("users"),
                // timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
                // account_data: args.depend::<account_data::Service>("account_data"),
                // pusher: args.depend::<pusher::Service>("pusher"),
            },
            // db: Data::new(&args),
            sender,
            receiver: Mutex::new(receiver),
        })
    }

    async fn worker(self: Arc<Self>) -> Result<()> {
        // trait impl can't be split between files so this just glues to mod sender
        self.sender().await
    }

    fn interrupt(&self) {
        if !self.sender.is_closed() {
            self.sender.close();
        }
    }

    fn name(&self) -> &str {
        // crate::service::make_name(std::module_path!())
        std::module_path!()
    }
}

impl Service {
    // #[tracing::instrument(skip(self, pdu_id, user, pushkey), level = "debug")]
    // pub fn send_pdu_push(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> {
    //     let dest = Destination::Push(user.to_owned(), pushkey);
    //     let event = SendingEvent::Pdu(pdu_id.to_owned());
    //     let _cork = self.db.db.cork();
    //     let keys = self.db.queue_requests(&[(&dest, event.clone())])?;
    //     self.dispatch(Msg {
    //         dest,
    //         event,
    //         queue_id: keys.into_iter().next().expect("request queue key"),
    //     })
    // }

    #[tracing::instrument(skip(self, room_id, pdu_id), level = "debug")]
    pub fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result<()> {
        let servers = self
            .services
            .state_cache
            .room_servers(room_id)?
            .into_iter()
            .filter(|server_name| !self.services.config.server_is_ours(server_name));

        self.send_pdu_servers(servers, pdu_id)
    }

    #[tracing::instrument(skip(self, servers, pdu_id), level = "debug")]
    pub fn send_pdu_servers<I: Iterator<Item = OwnedServerName>>(
        &self,
        servers: I,
        pdu_id: &[u8],
    ) -> Result<()> {
        let requests = servers
            .into_iter()
            .map(|server| {
                (
                    Destination::Normal(server),
                    SendingEvent::Pdu(pdu_id.to_owned()),
                )
            })
            .collect::<Vec<_>>();

        let keys = self.queue_requests(
            &requests
                .iter()
                .map(|(o, e)| (o, e.clone()))
                .collect::<Vec<_>>(),
        )?;
        for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
            self.dispatch(Msg {
                dest,
                event,
                queue_id,
            })?;
        }

        Ok(())
    }

    // #[tracing::instrument(skip(self, server, serialized), level = "debug")]
    // pub fn send_edu_server(&self, server: &ServerName, serialized: Vec<u8>) -> Result<()> {
    //     let dest = Destination::Normal(server.to_owned());
    //     let event = SendingEvent::Edu(serialized);
    //     let _cork = self.db.db.cork();
    //     let keys = self.db.queue_requests(&[(&dest, event.clone())])?;
    //     self.dispatch(Msg {
    //         dest,
    //         event,
    //         queue_id: keys.into_iter().next().expect("request queue key"),
    //     })
    // }

    // #[tracing::instrument(skip(self, room_id, serialized), level = "debug")]
    // pub fn send_edu_room(&self, room_id: &RoomId, serialized: Vec<u8>) -> Result<()> {
    //     let servers = self
    //         .services
    //         .state_cache
    //         .room_servers(room_id)
    //         .filter_map(Result::ok)
    //         .filter(|server_name| !self.services.globals.server_is_ours(server_name));

    //     self.send_edu_servers(servers, serialized)
    // }

    // #[tracing::instrument(skip(self, servers, serialized), level = "debug")]
    // pub fn send_edu_servers<I: Iterator<Item = OwnedServerName>>(
    //     &self,
    //     servers: I,
    //     serialized: Vec<u8>,
    // ) -> Result<()> {
    //     let requests = servers
    //         .into_iter()
    //         .map(|server| {
    //             (
    //                 Destination::Normal(server),
    //                 SendingEvent::Edu(serialized.clone()),
    //             )
    //         })
    //         .collect::<Vec<_>>();
    //     let _cork = self.db.db.cork();
    //     let keys = self.db.queue_requests(
    //         &requests
    //             .iter()
    //             .map(|(o, e)| (o, e.clone()))
    //             .collect::<Vec<_>>(),
    //     )?;

    //     for ((dest, event), queue_id) in requests.into_iter().zip(keys) {
    //         self.dispatch(Msg {
    //             dest,
    //             event,
    //             queue_id,
    //         })?;
    //     }

    //     Ok(())
    // }

    #[tracing::instrument(skip(self, room_id), level = "debug")]
    pub fn flush_room(&self, room_id: &RoomId) -> Result<()> {
        let servers = self
            .services
            .state_cache
            .room_servers(room_id)?
            .into_iter()
            .filter(|server_name| !self.services.config.server_is_ours(server_name));

        self.flush_servers(servers)
    }

    #[tracing::instrument(skip(self, servers), level = "debug")]
    pub fn flush_servers<I: Iterator<Item = OwnedServerName>>(&self, servers: I) -> Result<()> {
        let requests = servers.into_iter().map(Destination::Normal);
        for dest in requests {
            self.dispatch(Msg {
                dest,
                event: SendingEvent::Flush,
                queue_id: Vec::<u8>::new(),
            })?;
        }

        Ok(())
    }

    #[tracing::instrument(skip_all, name = "request")]
    pub async fn send_federation_request<T>(
        &self,
        dest: &ServerName,
        request: T,
    ) -> Result<T::IncomingResponse>
    where
        T: OutgoingRequest + Debug + Send,
    {
        let client = &self.services.client.federation;
        self.send(client, dest, request).await
    }

    /// Cleanup event data
    /// Used for instance after we remove an appservice registration
    // #[tracing::instrument(skip(self), level = "debug")]
    // pub fn cleanup_events(&self, appservice_id: String) -> Result<()> {
    //     self.db
    //         .delete_all_requests_for(&Destination::Appservice(appservice_id))?;

    //     Ok(())
    // }

    fn dispatch(&self, msg: Msg) -> Result<()> {
        debug_assert!(!self.sender.is_full(), "channel full");
        debug_assert!(!self.sender.is_closed(), "channel closed");
        self.sender.send(msg).map_err(|_e| {
            // if let Destination::Normal(server) = msg.dest {
            //     Error::FederationError(msg.dest, e)
            // } else {
            Error::BadServerResponse("dispatch error!")
            // }
        })
    }

    pub(super) fn queue_requests(
        &self,
        requests: &[(&Destination, SendingEvent)],
    ) -> Result<Vec<Vec<u8>>> {
        let mut batch = Vec::new();
        let mut keys = Vec::new();
        for (destination, event) in requests {
            let mut key = destination.get_prefix();
            if let SendingEvent::Pdu(value) = &event {
                key.extend_from_slice(value);
            } else {
                key.extend_from_slice(&self.services.global.next_count()?.to_be_bytes());
            }
            let value = if let SendingEvent::Edu(value) = &event {
                &**value
            } else {
                &[]
            };
            batch.push((key.clone(), value.to_owned()));
            keys.push(key);
        }
        // self.servernameevent_data
        //     .insert_batch(batch.iter().map(database::KeyVal::from))?;
        Ok(keys)
    }
}
