use std::{
    sync::{Arc, Mutex},
    time::Duration,
};

use axum::{
    body::Bytes,
    extract::{ Multipart, State},
    routing::post,
    Json, Router,
};
use base64::{engine::general_purpose, Engine};
use tower_http::trace::TraceLayer;
use tracing::Span;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};

mod models;
mod yolov8;

#[tokio::main]
async fn main() {
    // 注册日志追踪
    tracing_subscriber::registry()
        .with(
            tracing_subscriber::EnvFilter::try_from_default_env()
                .unwrap_or_else(|_| "my_reqwest_response=debug,tower_http=debug".into()),
        )
        .with(tracing_subscriber::fmt::layer())
        .init();

    // 初始化模型
    let mut config = yolov8::yolo_config::Config::default();
    config.model = "/home/xtxk/workspace/yolov8/models/yolov8m.onnx".to_string();
    config.cuda = true;
    config.profile = true;
    config.fp16 = true;
    let model = yolov8::model::YOLOv8::new(config).unwrap();
    model.summary();
    let model = AppState {
        predictor: Arc::new(Mutex::new(model)),
    };

    // 配置路由
    let app = Router::new()
        .route("/yolov8/predict", post(yolov8_predict_handler))
        .route("/file/upload", post(file_upload_handler))
        .with_state(model)
        .layer(TraceLayer::new_for_http().on_body_chunk(
            |chunk: &Bytes, _latency: Duration, _span: &Span| {
                tracing::debug!("streaming {} bytes", chunk.len());
            },
        ));
    // 配置监听
    let listener = tokio::net::TcpListener::bind("0.0.0.0:5061")
        .await
        .unwrap();
    tracing::debug!("listening on {}", listener.local_addr().unwrap());
    // 启动服务
    axum::serve(listener, app.into_make_service())
        .await
        .unwrap();
}

#[derive(Clone)]
struct AppState {
    predictor: Arc<Mutex<yolov8::model::YOLOv8>>,
}

// 处理 yolov8 预测请求
async fn yolov8_predict_handler(
    State(model): State<AppState>,
    Json(body): Json<models::request::PredictRequest>,
) -> Json<models::request::PredictResult> {
    let img_bytes = general_purpose::STANDARD.decode(body.image).unwrap();
    let img = image::load_from_memory_with_format(&img_bytes, image::ImageFormat::Jpeg).unwrap();

    let res = model_predict(body.request_id, &model.predictor, img);

    Json(res)
}

// 处理文件上传请求
async fn file_upload_handler(
    State(model): State<AppState>,
    mut multipart: Multipart,
) -> Json<models::request::PredictResult> {
    let r = if let Some(file) = multipart.next_field().await.unwrap() {
        // let content_ype = file.content_type().unwrap().to_string();
        // println!("content_type: {}", content_ype);
        let t_pre = std::time::Instant::now();
        let data = file.bytes().await.unwrap();
        println!("[Image Dowload]: {:?}", t_pre.elapsed());
        let img = image::load_from_memory_with_format(&data,image::ImageFormat::Jpeg).unwrap();

        println!("[Image Parse]: {:?}", t_pre.elapsed());


        // 保存上传文件
        // tokio::fs::write("d:/207/image-1-axum.jpeg", &data)
        //     .await
        //     .unwrap();
        model_predict("request_id".to_string(), &model.predictor, img)
    } else {
        models::request::PredictResult {
            request_id: "request_id".to_string(),
            objs: None,
        }
    };
    Json(r)
}

// 模型推理
fn model_predict(
    request_id: String,
    predictor: &Arc<Mutex<yolov8::model::YOLOv8>>,
    img: image::DynamicImage,
) -> models::request::PredictResult {
    let mut predictor = predictor.lock().unwrap();
    let res = *predictor.run(&vec![img]).unwrap();
    // 构建推理结果
    let t_pre = std::time::Instant::now();
    let mut yes_arr = res
        .into_iter()
        .map(|yes| match yes.bboxes {
            Some(bboxes) => bboxes
                .iter()
                .map(|b| models::request::PredictObject {
                    class_name: "person".to_string(),
                    class_id: b.id() as i32,
                    conf: b.confidence(),
                    position: vec![
                        b.xmin() as i32,
                        b.ymin() as i32,
                        b.xmax() as i32,
                        b.ymax() as i32,
                    ],
                })
                .collect::<Vec<_>>(),
            _ => Vec::new(),
        })
        .collect::<Vec<_>>();
    println!("[Predict Result Parse]: {:?}", t_pre.elapsed());
    models::request::PredictResult {
        request_id: request_id,
        objs: Some(yes_arr.pop().unwrap()),
    }
}
