use axum::routing::get;
use axum::{Router, routing::post};
use futures_core::{Stream, TryStream};
use futures_util::{stream, stream::StreamExt, stream::TryStreamExt};
use hdrhistogram::Histogram;
use pin_project_lite::pin_project;
use rand::{self, Rng};
use std::hash::Hash;
use std::time::Duration;
use std::{
    pin::Pin,
    task::{Context, Poll},
};
use axum::body::Body;

use tokio::time::{self, Instant};
use tower::balance as lb;
use tower::discover::{Change, Discover};
use tower::limit::concurrency::ConcurrencyLimit;
use tower::limit::{ConcurrencyLimitLayer, RateLimitLayer};
use tower::load;
use tower::util::ServiceExt;
use tower::{ServiceBuilder, timeout::TimeoutLayer};
use tower_http::trace::TraceLayer;
use tower_service::Service;
async fn create_order() -> &'static str {
    "ok"
}

async fn hello() -> &'static str {
    "ok"
}
use axum::extract::Request;
use tower::util::BoxCloneService;

const REQUESTS: usize = 100_000;
const CONCURRENCY: usize = 500;
const DEFAULT_RTT: Duration = Duration::from_millis(30);
static ENDPOINT_CAPACITY: usize = CONCURRENCY;
static MAX_ENDPOINT_LATENCIES: [Duration; 10] = [
    Duration::from_millis(1),
    Duration::from_millis(5),
    Duration::from_millis(10),
    Duration::from_millis(10),
    Duration::from_millis(10),
    Duration::from_millis(100),
    Duration::from_millis(100),
    Duration::from_millis(100),
    Duration::from_millis(500),
    Duration::from_millis(1000),
];

struct Summary {
    latencies: Histogram<u64>,
    start: Instant,
    count_by_instance: [usize; 10],
}
use tokio::sync::mpsc;
use tokio::time::sleep;
use tracing;
use tracing_subscriber;
use tower::load_shed::LoadShedLayer;
use tower_governor::{governor::GovernorConfigBuilder, GovernorLayer};

#[derive(Debug)]
struct Job(u64);
use axum::{
    http::{ Response},
    middleware::Next,
};
use tokio::signal;

async fn shutdown_signal() {
    let ctrl_c = async {
        signal::ctrl_c()
            .await
            .expect("failed to install Ctrl+C handler");
    };

    #[cfg(unix)]
    let terminate = async {
        signal::unix::signal(signal::unix::SignalKind::terminate())
            .expect("failed to install signal handler")
            .recv()
            .await;
    };

    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();

    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }

    println!("收到停机信号，准备优雅退场...");
}
use tokio_util::sync::CancellationToken;

async fn background_task(token: CancellationToken) {
    while !token.is_cancelled() {
        println!("后台厨子正在颠勺...");
        tokio::select! {
            _ = tokio::time::sleep(std::time::Duration::from_secs(1)) => {}
            _ = token.cancelled() => {
                // 下班铃响了，不颠了
                break;
            }
        }
    }
    println!("厨子打扫完厨房，下班！");
}
#[tokio::main]
async fn main() {
    println!("Hello, world!");
    tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::default()).unwrap();

    // 这是下班铃的遥控器
    let cancellation_token = CancellationToken::new();
    let token_clone = cancellation_token.clone();

    // 派一个厨子去后台干活，并给他一个下班铃分机
    let background_handle = tokio::spawn(background_task(token_clone));

    println!("REQUESTS={}", REQUESTS);
    println!("CONCURRENCY={}", CONCURRENCY);
    println!("ENDPOINT_CAPACITY={}", ENDPOINT_CAPACITY);
    print!("MAX_ENDPOINT_LATENCIES=[");
    for max in &MAX_ENDPOINT_LATENCIES {
        let l = max.as_secs() * 1_000 + u64::from(max.subsec_millis());
        print!("{}ms, ", l);
    }
    println!("]");

    // 有界队列，容量 1024
    let (tx, mut rx) = mpsc::channel::<Job>(1024);

    // 生产者：如果队列满了，try_send 会返回错误，这里选择快速失败
    let producer = tokio::spawn(async move {
        for i in 0..10_000u64 {
            if tx.try_send(Job(i)).is_err() {
                // 背压信号：丢弃或记录并交给上游重试
            }
        }
    });

    // 消费者：模拟处理耗时
    let consumer = tokio::spawn(async move {
        while let Some(job) = rx.recv().await {
            // 处理一条
            let _ = job;
            sleep(Duration::from_millis(2)).await;
        }
    });

    let _ = tokio::join!(producer, consumer);
    //https://mp.weixin.qq.com/s/_5Xx0VVY2AYPzBcS4DKqGQ?poc_token=HAL3rmijpo6EESWsYHw8XQfopzJUvJFFtsv00sA2
    let middleware_stack = ServiceBuilder::new()
        .layer(ConcurrencyLimitLayer::new(256))
        .layer(RateLimitLayer::new(100, Duration::from_secs(1)))
        .layer(TimeoutLayer::new(Duration::from_secs(2)))
        .layer(TraceLayer::new_for_http())
        .into_inner();

    // 允許每個IP地址最多有五個請求，每兩秒鐘補充一個
    // 我們將其裝箱是因爲Axum 0.6要求所有層都是克隆的，因此我們需要一個靜態引用
    let governor_conf = Box::new(
        GovernorConfigBuilder::default()
            .per_second(2)
            .burst_size(5)
            .finish()
            .unwrap(),
    );

    let governor_limiter = governor_conf.limiter().clone();
    let interval = Duration::from_secs(60);
    // 一個單獨的後臺任務
    std::thread::spawn(move || {
        loop {
            std::thread::sleep(interval);
            tracing::info!("rate limiting storage size: {}", governor_limiter.len());
            governor_limiter.retain_recent();
        }
    });

    // 使用 BoxCloneService 包装 Router 使其具备 Clone 能力
    let app = Router::new()
        .route("/orders", post(create_order))
        .route("/", get(hello))
        .layer(GovernorLayer {
            // 我們可以泄漏它，因爲它是一次性創建的
            config: Box::leak(governor_conf),
        })
    // let shared_service = BoxCloneService::new(app.clone())
        //.layer(middleware_stack)
       .into_make_service_with_connect_info::<std::net::SocketAddr>();


    //.layer(shared_service);

    let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
    println!("Server running on http://0.0.0.0:3000");
    axum::serve(listener, app)
        .with_graceful_shutdown(shutdown_signal())
        .await
        .unwrap();

    // 服务员下班后，按响遥控器，通知厨子下班
    cancellation_token.cancel();

    // 等厨子打扫完厨房
    if let Err(e) = background_handle.await {
        eprintln!("厨子下班路上出了点问题: {}", e);
    }

    println!("餐厅完美打烊，所有人都安全回家！");
}
