//! example timeseries:
//! Futures.Huobi.v1:BTC-USD.QUARTER:rt_et
//! Futures.Huobi.v1:BTC-USD.QUARTER:ft_et
//! Futures.Huobi.v1:BTC-USD.QUARTER:ft_rt
mod scale;
mod slack;
use anyhow::Context;
use anyhow::Result;
use clap::Clap;
use futures::FutureExt;
use futures::StreamExt;
use influx_db_client::{Client, Point, Precision, Value};
use plotters::prelude::*;
use std::sync::Arc;

type LastFeedMap = Arc<chashmap::CHashMap<String, (chrono::DateTime<chrono::Utc>, String)>>;

#[derive(Clap, Clone, Debug)]
#[clap(version = "1.0", author = "Donggu Kang")]
struct Opts {
    #[clap(long, default_value = "tcp://18.163.136.233:5124")]
    relay_server: String,
    #[clap(long, default_value = "5")]
    check_period_minutes: i64,
    #[clap(long)]
    enable_influxdb: bool,
    #[clap(long, default_value = "http://52.7.227.223:8086")]
    influxdb: String,
    #[clap(long)]
    debug: bool,
    #[clap(long)]
    ignore_trade: bool,
    #[clap(long)]
    mear_symbols: Vec<String>,
}

const BATCH_SIZE: usize = 1000;

#[derive(Debug, Default)]
struct Aggregation {
    time: i64,
    mn: i64,
    mx: i64,
}

impl Aggregation {
    fn update(&mut self, x: i64) {
        if x < self.mn {
            self.mn = x;
        }
        if x > self.mx {
            self.mx = x;
        }
    }
}

struct Histograms {
    pub rt_et: hdrhistogram::Histogram<u64>,
    pub ft_rt: hdrhistogram::Histogram<u64>,
}

use sailfish::TemplateOnce;
#[derive(TemplateOnce)]
#[template(path = "dashboard.stpl")]
struct DashboardTemplate<'a> {
    host: String,
    relay_server: &'a str,
    enable_influxdb: bool,
    influxdb: &'a str,
    start: String,
    uptime: String,
    mear_symbols: &'a Vec<String>,
    last_feed: LastFeedMap,
    svg: String,
}

fn unwrap(res: Result<anyhow::Result<()>, tokio::task::JoinError>) -> anyhow::Result<()> {
    res.unwrap_or_else(|e| Err(anyhow::Error::new(e)))
}

async fn subscribe_relay(
    opts: Opts,
    influx_tx: tokio::sync::mpsc::Sender<Point>,
    hists: std::sync::Arc<std::sync::RwLock<Histograms>>,
    last_feed: LastFeedMap,
) -> anyhow::Result<()> {
    let mut socket = tmq::subscribe(&tmq::Context::new())
        .connect(&opts.relay_server)?
        .subscribe(opts.mear_symbols[0].as_bytes())?;
    log::debug!("subscribed {}", opts.mear_symbols[0]);
    for i in 1..opts.mear_symbols.len() {
        socket.subscribe(opts.mear_symbols[i].as_bytes())?;
        log::debug!("subscribed {}", opts.mear_symbols[i]);
    }

    let mut cnt = 0;
    let mut rt_et = Aggregation::default();
    let mut ft_et = Aggregation::default();
    let mut ft_rt = Aggregation::default();

    // the benchmark shows that
    // the loop (parsing json && sending to queue && etc) takes less than 1ms even in debug mode
    /*
        189.34µs
        125.12µs
        160.26µs
        123.39µs
        77.80µs
        95.62µs
        190.82µs
        274.44µs
        186.55µs
    */
    while let Some(Ok(msg)) = socket.next().await {
        let ft: i64 = chrono::Utc::now().timestamp_nanos();

        let mut it = msg.iter();
        let topic = it.next().unwrap().as_str().unwrap_or("invalid text");
        let rt: i64 = it
            .next()
            .unwrap()
            .as_str()
            .unwrap_or("invalid text")
            .parse::<i64>()
            .unwrap();
        let msg = it.next().unwrap().as_str().unwrap_or("invalid text");

        if opts.debug {
            log::debug!("{}", msg);
        }

        if opts.ignore_trade && msg.contains("trade") {
            continue;
        }

        let json: serde_json::Value = serde_json::from_str(&msg[..])?;

        if json["data"][0]["timestamp"].is_null() {
            eprintln!("non-feed msg: {}", msg);
            continue;
        }

        // we report rt-et, ft-et, ft-rt
        let et =
            chrono::DateTime::parse_from_rfc3339(json["data"][0]["timestamp"].as_str().unwrap())
                .unwrap()
                .with_timezone(&chrono::Utc);
        last_feed.insert(topic.to_string(), (et, msg.to_string()));
        let et: i64 = et.timestamp_nanos();

        {
            let mut hists = hists.write().unwrap();
            // due to time sync, these can be negative
            // however histogram only supports u64
            hists
                .rt_et
                .record((((rt - et) / 1_000_000) as u64).max(1).min(1_000))?;
            hists
                .ft_rt
                .record((((ft - rt) / 1_000_000) as u64).max(1).min(1_000))?;
        }

        if opts.enable_influxdb {
            if cnt == 0 || cnt > BATCH_SIZE {
                if cnt > BATCH_SIZE {
                    influx_tx.try_send(
                        Point::new(&topic)
                            .add_timestamp(rt_et.time)
                            .add_field(format!("{}:rt_et:min", topic), Value::Integer(rt_et.mn))
                            .add_field(format!("{}:rt_et:max", topic), Value::Integer(rt_et.mx)),
                    )?;
                    influx_tx.try_send(
                        Point::new(&topic)
                            .add_timestamp(ft_et.time)
                            .add_field(format!("{}:ft_et:min", topic), Value::Integer(ft_et.mn))
                            .add_field(format!("{}:ft_et:max", topic), Value::Integer(ft_et.mx)),
                    )?;
                    influx_tx.try_send(
                        Point::new(&topic)
                            .add_timestamp(ft_rt.time)
                            .add_field(format!("{}:ft_rt:min", topic), Value::Integer(ft_rt.mn))
                            .add_field(format!("{}:ft_rt:max", topic), Value::Integer(ft_rt.mx)),
                    )?;
                }
                cnt = 1;
                rt_et = Aggregation {
                    time: et,
                    mn: rt - et,
                    mx: rt - et,
                };
                ft_et = Aggregation {
                    time: et,
                    mn: ft - et,
                    mx: ft - et,
                };
                ft_rt = Aggregation {
                    time: et,
                    mn: ft - rt,
                    mx: ft - rt,
                };
            } else {
                cnt += 1;
                rt_et.update(rt - et);
                ft_et.update(ft - et);
                ft_rt.update(ft - rt);
            }
        }
    }

    Ok(())
}

fn get_hostname() -> String {
    hostname::get().unwrap_or(std::ffi::OsString::new()).to_string_lossy().into_owned()
}

async fn send_to_influx(
    opts: Opts,
    mut rx: tokio::sync::mpsc::Receiver<Point>,
) -> anyhow::Result<()> {
    let client = Client::new(url::Url::parse(&opts.influxdb)?, & get_hostname())
        .set_authentication("admin", "protos123");
    client.create_database(client.get_db()).await?;

    while let Some(point) = rx.recv().await {
        match tokio::time::timeout(
            std::time::Duration::from_secs(1),
            client.write_point(point, Some(Precision::Nanoseconds), None),
        )
        .await
        {
            Ok(res) => {
                res.context("fail to send to influxdb")?;
            }
            Err(_) => {
                eprintln!("Writing to InfluxDB timed out (1 second)");
                continue;
            }
        }
    }

    Ok(())
}

#[rocket::get("/")]
fn get_dashboard(
    start: rocket::State<chrono::DateTime<chrono::Utc>>,
    opts: rocket::State<Opts>,
    last_feed: rocket::State<LastFeedMap>,
    hists: rocket::State<std::sync::Arc<std::sync::RwLock<Histograms>>>,
) -> rocket::response::content::Html<String> {
    let hists = hists.read().unwrap();
    let end = chrono::Utc::now();

    let mut svg1 = String::new();
    {
        let root = SVGBackend::with_string(&mut svg1, (800, 600)).into_drawing_area();
        root.fill(&WHITE).unwrap();

        let mut chart = ChartBuilder::on(&root)
            .x_label_area_size(35)
            .y_label_area_size(100)
            .margin(20)
            .caption(
                format!(
                    "{} products ({} - {})",
                    opts.mear_symbols.len(),
                    start.format("%Y-%m-%d %H:%M"),
                    end.format("%Y-%m-%d %H:%M")
                ),
                ("monospace", 20.0).into_font(),
            )
            .build_cartesian_2d(
                scale::InverseLogRange(0.1f64..0.9999)
                    .with_key_points(vec![0.1, 0.5, 0.9, 0.99, 0.999, 0.9999]),
                (1f64..1_000f64).log_scale(),
            )
            .unwrap();

        chart
            .configure_mesh()
            .disable_x_mesh()
            .bold_line_style(&WHITE.mix(0.3))
            .y_desc("Latency")
            .x_desc("Quantile")
            .axis_desc_style(("sans-serif", 15).into_font())
            .x_label_formatter(&|x| format!("{}%", x * 100.0))
            .y_label_formatter(&|y| format!("{} ms", y))
            .draw()
            .unwrap();

        chart
            .draw_series(LineSeries::new(
                hists.rt_et.iter_quantiles(3).map(|it| {
                    (
                        it.quantile_iterated_to() as f64,
                        it.value_iterated_to() as f64,
                    )
                }),
                &RED,
            ))
            .unwrap()
            .label("rt-et")
            .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &RED));

        chart
            .draw_series(LineSeries::new(
                hists.ft_rt.iter_quantiles(3).map(|it| {
                    (
                        it.quantile_iterated_to() as f64,
                        it.value_iterated_to() as f64,
                    )
                }),
                &GREEN,
            ))
            .unwrap()
            .label("ft-rt")
            .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &GREEN));

        chart
            .configure_series_labels()
            .border_style(&BLACK)
            .draw()
            .unwrap();
    }

    let template = DashboardTemplate {
        host: get_hostname(),
        relay_server: &opts.relay_server,
        enable_influxdb: opts.enable_influxdb,
        influxdb: &opts.influxdb,
        start: start.format("%Y-%m-%d %H:%M").to_string(),
        uptime: humantime::format_duration((chrono::Utc::now() - *start).to_std().unwrap())
            .to_string(),
        mear_symbols: &opts.mear_symbols,
        last_feed: (*last_feed).clone(),
        svg: svg1,
    };

    let html = template.render_once().unwrap();
    rocket::response::content::Html(html)
}

async fn period_feed_check(opts: Opts, last_feed: LastFeedMap, start: chrono::DateTime<chrono::Utc>) -> Result<()> {
    let period = chrono::Duration::minutes(opts.check_period_minutes);
    let check_period = period.to_std()?;
    let grace_period = chrono::Duration::minutes(1);
    let mut failed = vec![false;opts.mear_symbols.len()];

    loop {
        let now = chrono::Utc::now();
        for (i, mear_symbol) in opts.mear_symbols.iter().enumerate() {
            if failed[i] {
                if let Some(record) = last_feed.get(mear_symbol) {
                    let last = record.0;
                    let elapsed = now-last;
                    if elapsed < period {
                        failed[i] = false;
                        slack::send(slack_api::chat::PostMessageRequest {
                            text: &format!("[{}] [{}] Feed is recovered", get_hostname(), mear_symbol),
                            channel: "#coin_feed_noti",
                            link_names: Some(true),
                            ..Default::default()
                        })
                        .await?;
                    }
                }
            } else {
                if let Some(record) = last_feed.get(mear_symbol) {
                    let last = record.0;
                    let elapsed = now-last;
                    if elapsed > period {
                        failed[i] = true;
                        slack::send(slack_api::chat::PostMessageRequest {
                            text: &format!("[{}] [{}] Feed is not received for {}\n@donggu", get_hostname(), mear_symbol, humantime::format_duration(elapsed.to_std().unwrap())
                            .to_string()),
                            channel: "#coin_feed_noti",
                            link_names: Some(true),
                            ..Default::default()
                        })
                        .await?;
                        break;
                    }
                } else {
                    let elapsed = now-start;
                    if elapsed > grace_period {
                        failed[i] = true;
                        slack::send(slack_api::chat::PostMessageRequest {
                            text: &format!("[{}] [{}] Feed is still not ready for {}\n@donggu", get_hostname(), mear_symbol, humantime::format_duration(elapsed.to_std().unwrap()).to_string()),
                            channel: "#coin_feed_noti",
                            link_names: Some(true),
                            ..Default::default()
                        })
                        .await?;
                        break;
                    }
                }
            }
        }
        tokio::time::sleep(check_period).await;
    }
}

#[tokio::main]
async fn main() -> Result<()> {
    env_logger::init();
    let opts: Opts = Opts::parse();
    log::info!("{:?}", opts);

    if opts.mear_symbols.is_empty() {
        anyhow::bail!("--mear-symbols is not provided");
    }

    let start = chrono::Utc::now();

    // record up to 10 seconds
    let hists = std::sync::Arc::new(std::sync::RwLock::new(Histograms {
        rt_et: hdrhistogram::Histogram::<u64>::new_with_max(10 * 1_000, 3).unwrap(),
        ft_rt: hdrhistogram::Histogram::<u64>::new_with_max(10 * 1_000, 3).unwrap(),
    }));

    let (tx, rx) = tokio::sync::mpsc::channel(1000);
    let mut handles = vec![];
    let last_feed = LastFeedMap::default();

    {
        clone_all::clone_all!(opts, hists, last_feed);
        handles.push(tokio::spawn(async move {
            subscribe_relay(opts, tx, hists, last_feed).await?;
            Ok(())
        }));
    }

    // feed-relay subscription test
    {
        clone_all::clone_all!(opts, last_feed);
        handles.push(tokio::spawn(async move {
            period_feed_check(opts, last_feed, start).await?;
            Ok(())
        }));
    }

    if opts.enable_influxdb {
        let opts = opts.clone();
        handles.push(tokio::spawn(async move {
            send_to_influx(opts, rx).await?;
            Ok(())
        }));
    }

    {
        let hists = hists.clone();
        handles.push(tokio::spawn(async move {
            rocket::ignite()
                .manage(start)
                .manage(opts)
                .manage(last_feed)
                .manage(hists)
                .mount("/", rocket::routes![get_dashboard])
                .launch()
                .await?;
            Ok(())
        }));
    }

    let handles: Vec<_> = handles.into_iter().map(|f| f.map(unwrap)).collect();
    futures::future::try_join_all(handles).await?;
    Ok(())
}
