// #![allow(unused_imports)]
#![allow(dead_code)]

use std::collections::HashSet;
use std::convert::Infallible;
use std::env::var;
use std::io;
use std::net::{IpAddr, SocketAddr};
use std::str;
use std::{
    pin::Pin,
    task::{Context, Poll},
};

use std::path::Path;

use tokio::time::Duration;

use base64::encode;
use chrono::prelude::*;
use chrono::Utc;
use itertools::Itertools;
use parking_lot::{Condvar, Mutex};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};

use futures::future::{self, Either, TryFutureExt};

use http::version::Version;
use hyper::{service::make_service_fn, Server};
use tonic::{Code, Request, Response, Status};
use tower::Service;
use warp::reply::json;
use warp::{http::StatusCode, Filter, Reply};

use push::online_client::OnlineClient;
use push::push_server::{Push, PushServer};
use push::AccessToken;
use push::{AvroBulkCreate, JsonBulkCreate};
use xdefault::DefaultReply;

use serde::{Deserialize, Serialize};

use self::utils::*;
use tonic::transport::channel::Channel;

#[macro_use]
extern crate log as xlog;

type Error = Box<dyn std::error::Error + Send + Sync + 'static>;

pub mod xdefault {
    tonic::include_proto!("proto.xdefault");
}
pub mod push {
    tonic::include_proto!("proto.signature");
    tonic::include_proto!("proto.push");
    tonic::include_proto!("proto.online");
}

pub fn info(message: &str) {
    info!("[component:{}] {}", "push-gateway-rust", message)
}
pub fn warn(message: &str) {
    warn!("[component:{}] {}", "push-gateway-rust", message)
}
pub fn error(message: &str) {
    error!("[component:{}] {}", "push-gateway-rust", message)
}
pub struct MyPush {
    logs: states::Logs,
}

#[tonic::async_trait]
impl Push for MyPush {
    async fn create_staging_avro_bulk(
        &self,
        create: Request<AvroBulkCreate>,
    ) -> Result<Response<DefaultReply>, Status> {
        handlers::grpc_create_staging_avro_bulk(
            self.logs.avro_staging_log.clone(),
            create.into_inner(),
        )
        .await
    }
    async fn create_staging_json_bulk(
        &self,
        create: Request<JsonBulkCreate>,
    ) -> Result<Response<DefaultReply>, Status> {
        handlers::grpc_create_staging_json_bulk(
            self.logs.json_staging_log.clone(),
            create.into_inner(),
        )
        .await
    }
    async fn create_online_avro_bulk(
        &self,
        create: Request<AvroBulkCreate>,
    ) -> Result<Response<DefaultReply>, Status> {
        handlers::grpc_create_online_avro_bulk(
            self.logs.avro_online_log.clone(),
            self.logs.pipeline_online.clone(),
            create.into_inner(),
        )
        .await
    }
    async fn create_online_json_bulk(
        &self,
        create: Request<JsonBulkCreate>,
    ) -> Result<Response<DefaultReply>, Status> {
        handlers::grpc_create_online_json_bulk(
            self.logs.json_online_log.clone(),
            self.logs.pipeline_online.clone(),
            create.into_inner(),
        )
        .await
    }
}

mod log {
    use super::*;

    #[derive(Debug)]
    pub struct LogWriter {
        file: std::fs::File,
        is_binary: bool,
    }
    impl LogWriter {
        pub fn new(filepath: String, is_binary: bool) -> io::Result<LogWriter> {
            if let Some(parent) = Path::new(&filepath).parent() {
                std::fs::create_dir_all(parent)?;
            }

            let file = std::fs::OpenOptions::new()
                .write(true)
                .append(true)
                .create(true)
                .open(filepath)?;
            Ok(LogWriter { file, is_binary })
        }
        #[inline(always)]
        pub fn write(&mut self, buf: &[u8]) -> io::Result<()> {
            use std::io::Write;
            self.file.write_all(buf)?;
            self.file.sync_data()?;
            Ok(())
        }
    }
    #[derive(Clone)]
    pub struct OneLogger(Arc<Mutex<LogWriter>>);
    impl OneLogger {
        pub fn new(path: String, is_binary: bool) -> Self {
            let writer = LogWriter::new(path, is_binary).expect("open file failed!");
            OneLogger(Arc::new(Mutex::new(writer)))
        }
        #[inline(always)]
        pub fn log(&self, bs: &[u8]) {
            let mut shared = self.0.lock();
            let ret = shared.write(bs);
            if ret.is_err() {
                error("append error!");
            }
        }
    }

    struct MyBarrierState {
        count: usize,
        generation_id: usize,
        buf: Vec<u8>,
        writer: LogWriter,
        write_ts: i64,
    }
    pub struct MyBarrier {
        lock: Mutex<MyBarrierState>,
        cvar: Condvar,
        num_threads: usize,
    }

    pub struct BarrierLogger {
        barrier: MyBarrier,
    }
    impl BarrierLogger {
        pub fn new(barrier_num: usize, path: String, is_binary: bool) -> Self {
            let writer = LogWriter::new(path, is_binary).expect("open file failed!");
            let write_ts = Utc::now().timestamp_millis();
            let barrier = MyBarrier {
                lock: Mutex::new(MyBarrierState {
                    writer,
                    write_ts,
                    count: 0,
                    generation_id: 0,
                    buf: Vec::new(),
                }),
                cvar: Condvar::new(),
                num_threads: barrier_num,
            };

            BarrierLogger { barrier }
        }
        pub fn log(&self, bs: &[u8]) {
            let barrier = &self.barrier;

            let mut lock = barrier.lock.lock();
            lock.buf.extend_from_slice(bs);

            lock.count += 1;
            if lock.count < barrier.num_threads {
                barrier.cvar.wait(&mut lock);
            } else {
                lock.count = 0;
                lock.generation_id = lock.generation_id.wrapping_add(1);
                let bs = lock.buf.to_owned();
                if !bs.is_empty() {
                    lock.writer.write(&bs).expect("write data failed!");
                    lock.buf.clear();
                }
                lock.write_ts = Utc::now().timestamp_millis();

                barrier.cvar.notify_all();
            }
        }
        pub fn flush(&self, interval: i64) {
            let barrier = &self.barrier;
            let mut lock = barrier.lock.lock();

            let last_write_ts = lock.write_ts;
            let now_write_ts = Utc::now().timestamp_millis();

            if now_write_ts - last_write_ts > interval {
                lock.count = 0;
                lock.generation_id = lock.generation_id.wrapping_add(1);
                let bs: Vec<u8> = lock.buf.to_owned();

                if !bs.is_empty() {
                    lock.writer.write(&bs).expect("write data failed!");
                    lock.buf.clear();
                }
                lock.write_ts = now_write_ts;
                barrier.cvar.notify_all();
            }
        }
    }

    #[derive(Clone)]
    pub struct HashLogger(Arc<Vec<BarrierLogger>>);
    impl HashLogger {
        pub fn new(fs_num: u8, barrier_num: usize, path: String, is_binary: bool) -> Self {
            let logs = (0..fs_num)
                .map(|idx| {
                    let idx_text = idx.to_string();
                    let each_path = path.replace("<IDX>", &idx_text);
                    BarrierLogger::new(barrier_num, each_path, is_binary)
                })
                .collect::<Vec<_>>();
            HashLogger(Arc::new(logs))
        }
        #[inline(always)]
        pub fn log(&self, bs: &[u8]) {
            let loggers = &self.0;

            let tid = std::thread::current().id();
            use std::collections::hash_map::DefaultHasher;
            use std::hash::{Hash, Hasher};

            let mut hasher = DefaultHasher::new();
            tid.hash(&mut hasher);
            let hid = hasher.finish();
            let len: u64 = loggers.len() as u64;
            let idx: usize = (hid % len) as usize;

            let logger = &loggers[idx];
            logger.log(bs)
        }

        pub fn flush(&self, interval: i64) {
            let loggers = &self.0;
            for idx in 0..loggers.len() {
                let logger = &loggers[idx];
                logger.flush(interval);
            }
        }
    }
}

mod states {
    use super::log;
    use super::*;

    #[derive(Clone)]
    pub struct Logs {
        pub avro_staging_log: log::HashLogger,
        pub json_staging_log: log::HashLogger,
        pub avro_online_log: log::HashLogger,
        pub json_online_log: log::HashLogger,
        pub pipeline_online: OnlineClient<Channel>,
    }
    pub fn init_logs(
        sink_num: u8,
        barrier_num: usize,
        avro_staging_path: &str,
        json_staging_path: &str,
        avro_online_path: &str,
        json_online_path: &str,
        pipeline_online_nodes: &str,
    ) -> Logs {
        let pipeline_online_endpoints = pipeline_online_nodes
            .split(',')
            .map(|a| Channel::from_shared(a.to_owned()).unwrap());

        Logs {
            avro_staging_log: log::HashLogger::new(
                sink_num,
                barrier_num,
                avro_staging_path.into(),
                true,
            ),
            json_staging_log: log::HashLogger::new(
                sink_num,
                barrier_num,
                json_staging_path.into(),
                false,
            ),
            avro_online_log: log::HashLogger::new(
                sink_num,
                barrier_num,
                avro_online_path.into(),
                true,
            ),
            json_online_log: log::HashLogger::new(
                sink_num,
                barrier_num,
                json_online_path.into(),
                false,
            ),
            pipeline_online: OnlineClient::new(Channel::balance_list(pipeline_online_endpoints)),
        }
    }
}
mod filters {
    use super::handlers::*;
    use super::*;

    #[inline(always)]
    pub fn routes(
        logs: states::Logs,
    ) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
        let cors = warp::cors()
            .allow_any_origin()
            .allow_credentials(true)
            .allow_headers(vec![
                "Content-Type",
                "Authorization",
                "User-Agent",
                "Sec-Fetch-Mode",
                "Referer",
                "Origin",
                "Access-Control-Request-Method",
                "Access-Control-Request-Headers",
            ])
            .allow_methods(vec!["GET", "POST", "DELETE", "OPTIONS"]);

        warp::path("push_staging")
            .and(staging_avro_bulk(logs.avro_staging_log.clone()))
            .or(warp::path("push_online").and(online_avro_bulk(
                logs.avro_online_log.clone(),
                logs.pipeline_online.clone(),
            )))
            .or(warp::path("push_staging").and(staging_json_bulk(logs.json_staging_log.clone())))
            .or(warp::path("push_online").and(online_json_bulk(
                logs.json_online_log.clone(),
                logs.pipeline_online,
            )))
            .or(warp::path("push").and(warp::path("health").map(|| StatusCode::OK)))
            .with(cors)
    }

    #[inline(always)]
    pub fn with_logger(
        client: log::HashLogger,
    ) -> impl Filter<Extract = (log::HashLogger,), Error = std::convert::Infallible> + Clone {
        warp::any().map(move || client.clone())
    }

    #[inline(always)]
    pub fn with_pipeline(
        client: OnlineClient<Channel>,
    ) -> impl Filter<Extract = (OnlineClient<Channel>,), Error = std::convert::Infallible> + Clone
    {
        warp::any().map(move || client.clone())
    }

    #[inline(always)]
    pub fn staging_avro_bulk(
        logger: log::HashLogger,
    ) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
        warp::post()
            .and(warp::path!(
                "bucket" / String / "path" / String / "avro_bulk"
            ))
            .and(warp::header::<String>("Authorization"))
            .and(warp::body::content_length_limit(1024 * 1000))
            .and(warp::body::bytes())
            .and(with_logger(logger))
            .and_then(
                move |bucket, path, header_authorization, body_bytes: bytes::Bytes, writer| {
                    let create = AvroBulkCreate {
                        header_authorization,
                        compression: "".into(),
                        bucket,
                        path,
                        body: body_bytes.to_vec(),
                    };
                    async { grpc_to_http(grpc_create_staging_avro_bulk(writer, create).await) }
                },
            )
    }

    pub fn staging_json_bulk(
        logger: log::HashLogger,
    ) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
        with_logger(logger)
            .and(warp::header::<String>("Authorization"))
            .and(warp::path!(
                "bucket" / String / "path" / String / "json_bulk"
            ))
            .and(warp::post())
            .and(warp::body::content_length_limit(1024 * 1000))
            .and(warp::body::bytes())
            .and_then(
                |writer, header_authorization, bucket, path, body_bytes: bytes::Bytes| {
                    let create = JsonBulkCreate {
                        header_authorization,
                        compression: "none".into(),
                        bucket,
                        path,
                        body: body_bytes.to_vec(),
                    };
                    async { grpc_to_http(grpc_create_staging_json_bulk(writer, create).await) }
                },
            )
    }

    pub fn online_avro_bulk(
        logger: log::HashLogger,
        pipeline: OnlineClient<Channel>,
    ) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
        with_logger(logger)
            .and(with_pipeline(pipeline))
            .and(warp::header::<String>("Authorization"))
            .and(warp::path!(
                "bucket" / String / "path" / String / "avro_bulk"
            ))
            .and(warp::post())
            .and(warp::body::content_length_limit(1024 * 1000))
            .and(warp::body::bytes())
            .and_then(
                |writer, pipeline, header_authorization, bucket, path, body_bytes: bytes::Bytes| {
                    let create = AvroBulkCreate {
                        header_authorization,
                        compression: "".into(),
                        bucket,
                        path,
                        body: body_bytes.to_vec(),
                    };
                    async {
                        grpc_to_http(grpc_create_online_avro_bulk(writer, pipeline, create).await)
                    }
                },
            )
    }

    pub fn online_json_bulk(
        logger: log::HashLogger,
        pipeline: OnlineClient<Channel>,
    ) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
        with_logger(logger)
            .and(with_pipeline(pipeline))
            .and(warp::header::<String>("Authorization"))
            .and(warp::path!(
                "bucket" / String / "path" / String / "json_bulk"
            ))
            .and(warp::post())
            .and(warp::body::content_length_limit(1024 * 1000))
            .and(warp::body::bytes())
            .and_then(
                |writer, pipeline, header_authorization, bucket, path, body_bytes: bytes::Bytes| {
                    let create = JsonBulkCreate {
                        header_authorization,
                        compression: "".into(),
                        bucket,
                        path,
                        body: body_bytes.to_vec(),
                    };
                    async {
                        grpc_to_http(grpc_create_online_json_bulk(writer, pipeline, create).await)
                    }
                },
            )
    }
}

mod handlers {
    use super::*;
    #[derive(super::Deserialize, super::Serialize)]
    pub struct JsonStatus {
        code: u8,
        message: String,
        details: Option<String>,
    }
    #[inline(always)]
    pub fn grpc_code(code: Code) -> u8 {
        use Code::*;
        match code {
            Ok => 0,
            Cancelled => 1,
            Unknown => 2,
            InvalidArgument => 3,
            DeadlineExceeded => 4,
            NotFound => 5,
            AlreadyExists => 6,
            PermissionDenied => 7,
            ResourceExhausted => 8,
            FailedPrecondition => 9,
            Aborted => 10,
            OutOfRange => 11,
            Unimplemented => 12,
            Internal => 13,
            Unavailable => 14,
            DataLoss => 15,
            Unauthenticated => 16,
        }
    }
    impl From<super::Status> for JsonStatus {
        #[inline(always)]
        fn from(status: super::Status) -> Self {
            JsonStatus {
                code: grpc_code(status.code()),
                message: status.message().into(),
                details: None,
            }
        }
    }

    #[inline(always)]
    pub fn grpc_to_http<T>(
        grpc_result: Result<Response<T>, Status>,
    ) -> Result<impl super::Reply, super::Infallible>
    where
        T: super::Serialize,
    {
        match grpc_result {
            Ok(resp) => Ok(json(resp.get_ref())),
            Err(status) => {
                let status = JsonStatus::from(status);
                Ok(json(&status))
            }
        }
    }

    #[inline(always)]
    pub async fn grpc_create_staging_avro_bulk(
        logger: log::HashLogger,
        create: AvroBulkCreate,
    ) -> Result<Response<DefaultReply>, Status> {
        if var("PUSH_GATEWAY_AUTH_ENABLE") != Ok("false".to_string()) {
            auths::verify_token(create.header_authorization)?;
        }

        let bs = create.body;

        let utc = Utc::now();
        let new_bs: Vec<u8> = format!("{}|{}\n", utc, encode(&bs)).as_bytes().to_vec();
        logger.log(&new_bs);

        Ok(Response::new(DefaultReply {
            code: 0,
            message: "success".into(),
            details: "".to_string(),
            origin: "[hold for ip]".to_string(),
        }))
    }

    #[inline(always)]
    pub async fn grpc_create_online_avro_bulk(
        logger: log::HashLogger,
        pipeline_online: OnlineClient<Channel>,
        create: AvroBulkCreate,
    ) -> Result<Response<DefaultReply>, Status> {
        let create_log = create.clone();
        let create_grpc = create.clone();
        if var("PUSH_GATEWAY_AUTH_ENABLE") != Ok("false".to_string()) {
            auths::verify_token(create_log.header_authorization)?;
        }
        let bs = create.body;

        let pipeline_online_clone = &mut pipeline_online.clone();
        pipeline_online_clone.create_avro_bulk(create_grpc).await?;

        let utc: DateTime<Utc> = Utc::now();
        let new_bs: Vec<u8> = format!("{}|{}\n", utc, encode(&bs)).as_bytes().to_vec();
        logger.log(&new_bs);

        Ok(Response::new(DefaultReply {
            code: 0,
            message: "success".into(),
            details: "".to_string(),
            origin: "[hold for ip]".to_string(),
        }))
    }

    #[inline(always)]
    pub async fn grpc_create_staging_json_bulk(
        logger: log::HashLogger,
        create: JsonBulkCreate,
    ) -> Result<Response<DefaultReply>, Status> {
        if var("PUSH_GATEWAY_AUTH_ENABLE") != Ok("false".to_string()) {
            auths::verify_token(create.header_authorization)?;
        }

        let bs_str = String::from_utf8_lossy(&create.body);
        let utc: DateTime<Utc> = Utc::now();
        let new_bs = format!("{}|{}\n", utc, bs_str).as_bytes().to_vec();

        logger.log(&new_bs);

        Ok(Response::new(DefaultReply {
            code: 0,
            message: "success".into(),
            details: "".to_string(),
            origin: "[hold for ip]".to_string(),
        }))
    }

    #[inline(always)]
    pub async fn grpc_create_online_json_bulk(
        logger: log::HashLogger,
        pipeline_online: OnlineClient<Channel>,
        create: JsonBulkCreate,
    ) -> Result<Response<DefaultReply>, Status> {
        let create_log = create.clone();
        let create_grpc = create.clone();
        if var("PUSH_GATEWAY_AUTH_ENABLE") != Ok("false".to_string()) {
            auths::verify_token(create_log.header_authorization)?;
        }

        let pipeline_online_clone = &mut pipeline_online.clone();
        pipeline_online_clone
            .create_json_bulk(create_grpc.clone())
            .await?;

        let bs_str = String::from_utf8_lossy(&create.body);
        let utc = Utc::now();
        let new_bs = format!("{}|{}\n", utc, bs_str).as_bytes().to_vec();

        logger.log(&new_bs);

        Ok(Response::new(DefaultReply {
            code: 0,
            message: "success".into(),
            details: "".to_string(),
            origin: "[hold for ip]".to_string(),
        }))
    }
}

mod auths {
    use super::*;

    use std::collections::HashMap;

    #[inline(always)]
    pub fn parse_access_token(s: &str) -> Result<AccessToken, String> {
        let access_token_m = s
            .split('&')
            .map(|x| utils::tuple2(x, "=").unwrap())
            .collect::<HashMap<&str, &str>>();

        let start_ts_s = access_token_m
            .get("start_ts")
            .ok_or("field start_ts not find!")?;
        let end_ts_s = access_token_m
            .get("end_ts")
            .ok_or("field end_ts not find!")?;

        let start_ts = start_ts_s
            .parse()
            .map_err(|_| String::from("start_ts is not a valid numeric!"))?;
        let end_ts = end_ts_s
            .parse()
            .map_err(|_| String::from("end_ts is not a valid numeric!"))?;

        let ip = access_token_m
            .get("ip")
            .ok_or("field ip not find!")?
            .to_string();
        let payload_enc = access_token_m
            .get("payload_enc")
            .ok_or("field payload_enc not find!")?
            .to_string();

        Ok(AccessToken {
            start_ts,
            end_ts,
            payload_enc,
            ip,
        })
    }
    #[inline(always)]
    pub fn verify_token(authorization: String) -> Result<(), Status> {
        let pubkey_b64 = "3oJdJiBvUAbMCyX+Fw4+ttfvWhH7bKSrIpFDbvj+qz8=".to_string();
        let pubkey_bs = base64::decode(pubkey_b64).unwrap();
        let pubkey = codecs::pubkey(&pubkey_bs);

        let (token_type, signed_access_token) =
            utils::tuple2(&authorization, " ").ok_or_else(|| {
                Status::invalid_argument("token format error: [<token_type> <token_val>!")
            })?;
        if token_type != "Bearer" {
            return Err(Status::invalid_argument("Bearer token needed!"));
        }

        let (sign, access_token_s) = utils::tuple2(&signed_access_token, "&")
            .ok_or_else(|| Status::invalid_argument("missing sign and data!"))?;

        let (sign_name, sign_b64) = utils::tuple2(&sign, "=")
            .ok_or_else(|| Status::invalid_argument("key-value needed!"))?;

        if sign_name != "sign" {
            return Err(Status::invalid_argument("token sign is not first field!"));
        }

        let sign_val =
            base64::decode(sign_b64).map_err(|_| Status::invalid_argument("sign field error!"))?;
        let check = pubkey.verify(&access_token_s.as_bytes(), &sign_val).is_ok();
        if !check {
            return Err(Status::invalid_argument("token sign error!"));
        }

        let access_token = parse_access_token(access_token_s).map_err(Status::invalid_argument)?;

        let now_ts = utils::now_ts();

        if access_token.end_ts < now_ts {
            return Err(Status::deadline_exceeded("token expire!"));
        }
        Ok(())
    }
}
mod codecs {
    use super::*;
    use flate2::read::GzDecoder;
    use ring::signature::{UnparsedPublicKey, ED25519};
    use std::io::{Read, Write};

    pub fn pubkey(pubkey: &[u8]) -> UnparsedPublicKey<&[u8]> {
        UnparsedPublicKey::new(&ED25519, &pubkey)
    }
}

mod utils {
    use super::*;

    pub fn now_str_trunc(seconds: u32) -> (String, String) {
        let china_dt = Utc::now().with_timezone(&FixedOffset::east(8 * 3600));
        let china_day_str = china_dt.date().format("%Y%m%d").to_string();

        let china_tm = china_dt.time();
        let china_tm_secs = china_tm.hour() * 3600 + china_tm.minute() * 60 + china_tm.second();
        let china_tm_trunc =
            NaiveTime::from_num_seconds_from_midnight(china_tm_secs / seconds * seconds, 0);

        let china_tm_str_trunc = china_tm_trunc.format("%H%M").to_string();
        (china_day_str, china_tm_str_trunc)
    }

    pub fn now_ts() -> u64 {
        SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs()
    }

    pub fn tuple2<'a>(s: &'a str, sep: &str) -> Option<(&'a str, &'a str)> {
        s.splitn(2, sep).next_tuple::<(_, _)>()
    }

    pub fn resolve_socket_addr(nodes: &str) -> Option<IpAddr> {
        let ips = get_if_addrs::get_if_addrs()
            .unwrap()
            .iter()
            .map(|interface| interface.ip())
            .collect::<HashSet<_>>();
        nodes
            .split(',')
            .filter_map(|x| x.parse::<IpAddr>().ok())
            .find(|x| ips.contains(&x))
    }
    pub fn build_hosts(nodes: &str, port: &str) -> String {
        itertools::join(
            nodes.split(',').map(|x| format!("http://{}:{}", x, port)),
            ",",
        )
    }
}

async fn main_async() -> Result<(), Box<dyn std::error::Error>> {
    env_logger::init();
    dotenv::from_filename("meta-lib-static/env/.env").ok();

    let online_nodes =
        var("PUSH_GATEWAY_HOSTS__ONLINE").unwrap_or_else(|_| "127.0.0.1".to_string());
    let staging_nodes =
        var("PUSH_GATEWAY_HOSTS__STAGING").unwrap_or_else(|_| "127.0.0.1".to_string());
    let nodes = format!("{},{}", online_nodes, staging_nodes);

    let port = var("PUSH_GATEWAY_PORT").unwrap().parse::<u16>().unwrap();

    let o_ip = resolve_socket_addr(&nodes);
    if o_ip.is_none() {
        error("PUSH_GATEWAY_HOSTS not match! skip");
        return Ok(());
    }
    let addr = SocketAddr::new(o_ip.unwrap(), port);
    let prompt = format!("start server at {}", addr);
    info(&prompt);

    let file_store_dir =
        var("PUSH_GATEWAY_DATA").unwrap_or_else(|_| "/tmp/data_buffer_dev.db".to_string());
    std::fs::create_dir_all(&file_store_dir)?;

    let avro_staging_path = format!(
        "{}/dc_sdk_push_avro/origin=staging/region=default/{}_<IDX>.staging_avro.txt",
        file_store_dir, addr
    )
    .replace(":", "_");
    let json_staging_path = format!(
        "{}/dc_sdk_push_json/origin=staging/region=default/{}_<IDX>.staging_json.txt",
        file_store_dir, addr
    )
    .replace(":", "_");
    let avro_online_path = format!(
        "{}/dc_sdk_push_avro/origin=online/region=default/{}_<IDX>.online_avro.txt",
        file_store_dir, addr
    )
    .replace(":", "_");
    let json_online_path = format!(
        "{}/dc_sdk_push_json/origin=online/region=default/{}_<IDX>.online_json.txt",
        file_store_dir, addr
    )
    .replace(":", "_");

    let pipeline_online_hosts = var("PUSH_GATEWAY_PIPELINE_ONLINE_HOSTS").unwrap();
    let pipeline_online_port = var("PUSH_GATEWAY_PIPELINE_ONLINE_PORT").unwrap();
    let pipeline_online_nodes = build_hosts(&pipeline_online_hosts, &pipeline_online_port);

    let num_cpus_str = num_cpus::get().to_string();
    let fs_sink_num_str = var("PUSH_GATEWAY_FS_SINK_NUM").unwrap_or_else(|_| num_cpus_str);
    let fs_sink_num = fs_sink_num_str.parse().unwrap();

    let barrier_num_str = var("PUSH_GATEWAY_BARRIER_NUM").unwrap_or_else(|_| "1".into());
    let barrier_num = barrier_num_str.parse().unwrap();
    let logs = states::init_logs(
        fs_sink_num,
        barrier_num,
        &avro_staging_path,
        &json_staging_path,
        &avro_online_path,
        &json_online_path,
        &pipeline_online_nodes,
    );
    let logs_clone = logs.clone();
    tokio::spawn(async move {
        let interval = 10;
        loop {
            std::thread::sleep(Duration::from_millis(interval as u64));
            logs_clone.avro_staging_log.flush(interval);
            logs_clone.json_staging_log.flush(interval);
            logs_clone.avro_online_log.flush(interval);
            logs_clone.json_online_log.flush(interval);
        }
    });

    let my_tonic = PushServer::new(MyPush { logs: logs.clone() });
    let my_warp = warp::service(filters::routes(logs.clone()));

    Server::bind(&addr)
        .serve(make_service_fn(move |_| {
            let mut my_tonic = my_tonic.clone();
            let mut my_warp = my_warp.clone();
            future::ok::<_, Infallible>(tower::service_fn(
                move |req: hyper::Request<hyper::Body>| match req.version() {
                    Version::HTTP_11 | Version::HTTP_10 => Either::Left(
                        my_warp
                            .call(req)
                            .map_ok(|res| res.map(EitherBody::Left))
                            .map_err(Error::from),
                    ),
                    Version::HTTP_2 => Either::Right(
                        my_tonic
                            .call(req)
                            .map_ok(|res| res.map(EitherBody::Right))
                            .map_err(Error::from),
                    ),
                    _ => unimplemented!(),
                },
            ))
        }))
        .await?;

    Ok(())
}

fn main() -> Result<(), Box<dyn std::error::Error>> {
    // let threads = num_cpus::get() * 4;
    let threads = num_cpus::get() * 4;
    tokio::runtime::Builder::new_multi_thread()
        .worker_threads(threads)
        .enable_all()
        .build()?
        .block_on(main_async())?;

    Ok(())
}

enum EitherBody<A, B> {
    Left(A),
    Right(B),
}

impl<A, B> http_body::Body for EitherBody<A, B>
where
    A: http_body::Body + Send + Unpin,
    B: http_body::Body<Data = A::Data> + Send + Unpin,
    A::Error: Into<Error>,
    B::Error: Into<Error>,
{
    type Data = A::Data;
    type Error = Box<dyn std::error::Error + Send + Sync + 'static>;

    fn is_end_stream(&self) -> bool {
        match self {
            EitherBody::Left(b) => b.is_end_stream(),
            EitherBody::Right(b) => b.is_end_stream(),
        }
    }

    fn poll_data(
        self: Pin<&mut Self>,
        cx: &mut Context<'_>,
    ) -> Poll<Option<Result<Self::Data, Self::Error>>> {
        match self.get_mut() {
            EitherBody::Left(b) => Pin::new(b).poll_data(cx).map(map_option_err),
            EitherBody::Right(b) => Pin::new(b).poll_data(cx).map(map_option_err),
        }
    }

    fn poll_trailers(
        self: Pin<&mut Self>,
        cx: &mut Context<'_>,
    ) -> Poll<Result<Option<http::HeaderMap>, Self::Error>> {
        match self.get_mut() {
            EitherBody::Left(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into),
            EitherBody::Right(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into),
        }
    }
}

fn map_option_err<T, U: Into<Error>>(err: Option<Result<T, U>>) -> Option<Result<T, Error>> {
    err.map(|e| e.map_err(Into::into))
}
