use std::{cmp::Ordering, collections::HashMap, sync::Arc};

use futures::stream::{FuturesUnordered, StreamExt};
use futures_util::SinkExt;
use reqwest::Client;
use serde::Serialize;
use tokio::{
    sync::Semaphore,
    time::{self, Duration, Instant},
};
use tokio_tungstenite::{connect_async, tungstenite::Message};

use super::ProbeEnvelope;

#[derive(Debug, Serialize, Clone)]
pub struct LatencyStats {
    pub min_ms: f64,
    pub max_ms: f64,
    pub avg_ms: f64,
    pub p50_ms: f64,
    pub p95_ms: f64,
}

#[derive(Debug, Serialize, Clone)]
pub struct HttpBenchReport {
    pub url: String,
    pub total: u32,
    pub success: u32,
    pub failed: u32,
    pub duration_ms: u128,
    pub throughput_rps: f64,
    pub status_counts: HashMap<u16, u32>,
    pub latencies: Option<LatencyStats>,
    pub errors: Vec<String>,
}

#[derive(Debug, Serialize, Clone)]
pub struct WsBenchReport {
    pub url: String,
    pub connections: u32,
    pub success: u32,
    pub failed: u32,
    pub duration_ms: u128,
    pub handshake: Option<LatencyStats>,
    pub errors: Vec<String>,
}

pub struct HttpBenchOptions<'a> {
    pub url: &'a str,
    pub requests: u32,
    pub concurrency: usize,
    pub timeout_ms: u64,
}

pub struct WsBenchOptions<'a> {
    pub url: &'a str,
    pub connections: u32,
    pub concurrency: usize,
    pub messages: u32,
    pub payload: Option<&'a str>,
    pub timeout_ms: u64,
}

struct HttpOutcome {
    latency_ms: f64,
    status: u16,
}

struct WsOutcome {
    handshake_ms: f64,
}

pub async fn http_bench(options: HttpBenchOptions<'_>) -> ProbeEnvelope<HttpBenchReport> {
    let total = options.requests.max(1);
    let concurrency = options.concurrency.max(1);
    let timeout = if options.timeout_ms == 0 {
        None
    } else {
        Some(Duration::from_millis(options.timeout_ms))
    };

    let client = match build_http_client(timeout) {
        Ok(client) => client,
        Err(err) => {
            return ProbeEnvelope {
                target: options.url.to_string(),
                success: false,
                elapsed_ms: 0,
                data: None,
                error: Some(format!("failed to build HTTP client: {err}")),
            }
        }
    };

    let start_all = Instant::now();
    let semaphore = Arc::new(Semaphore::new(concurrency));
    let url = Arc::new(options.url.to_string());
    let client = Arc::new(client);

    let mut tasks = FuturesUnordered::new();
    for _ in 0..total {
        let permit_pool = semaphore.clone();
        let client = client.clone();
        let url = url.clone();
        let timeout = timeout;
        tasks.push(tokio::spawn(async move {
            let permit = permit_pool
                .acquire_owned()
                .await
                .map_err(|err| format!("semaphore error: {err}"))?;
            let start_req = Instant::now();
            let result = if let Some(timeout) = timeout {
                match time::timeout(timeout, client.get(url.as_str()).send()).await {
                    Ok(res) => res,
                    Err(_) => {
                        drop(permit);
                        return Err("request timeout".into());
                    }
                }
            } else {
                client.get(url.as_str()).send().await
            };
            drop(permit);

            match result {
                Ok(resp) => {
                    let status = resp.status().as_u16();
                    let latency = start_req.elapsed().as_secs_f64() * 1000.0;
                    let _ = resp.bytes().await;
                    Ok(HttpOutcome {
                        latency_ms: latency,
                        status,
                    })
                }
                Err(err) => Err(format!("request error: {err}")),
            }
        }));
    }

    let mut latencies = Vec::with_capacity(total as usize);
    let mut status_counts: HashMap<u16, u32> = HashMap::new();
    let mut success = 0u32;
    let mut failures = 0u32;
    let mut errors: Vec<String> = Vec::new();

    while let Some(result) = tasks.next().await {
        match result {
            Ok(Ok(outcome)) => {
                latencies.push(outcome.latency_ms);
                success += 1;
                *status_counts.entry(outcome.status).or_insert(0) += 1;
            }
            Ok(Err(err)) => {
                failures += 1;
                if errors.len() < 10 {
                    errors.push(err);
                }
            }
            Err(err) => {
                failures += 1;
                if errors.len() < 10 {
                    errors.push(format!("task join error: {err}"));
                }
            }
        }
    }

    let duration_ms = start_all.elapsed().as_millis();
    let throughput_rps = if duration_ms == 0 {
        success as f64
    } else {
        (success as f64) / (duration_ms as f64 / 1000.0)
    };
    let latencies = build_latency_stats(&latencies);

    let report = HttpBenchReport {
        url: options.url.to_string(),
        total,
        success,
        failed: failures,
        duration_ms,
        throughput_rps,
        status_counts,
        latencies,
        errors,
    };

    ProbeEnvelope {
        target: options.url.to_string(),
        success: failures == 0,
        elapsed_ms: duration_ms,
        data: Some(report),
        error: if failures == 0 {
            None
        } else {
            Some(format!("{failures} requests failed"))
        },
    }
}

fn build_http_client(timeout: Option<Duration>) -> Result<Client, reqwest::Error> {
    let mut builder = Client::builder().user_agent("net-monitor-http-bench/0.1");
    if let Some(timeout) = timeout {
        builder = builder.timeout(timeout);
    }
    builder.build()
}

pub async fn ws_bench(options: WsBenchOptions<'_>) -> ProbeEnvelope<WsBenchReport> {
    let connections = options.connections.max(1);
    let concurrency = options.concurrency.max(1);
    let timeout = if options.timeout_ms == 0 {
        None
    } else {
        Some(Duration::from_millis(options.timeout_ms))
    };

    let semaphore = Arc::new(Semaphore::new(concurrency));
    let url = Arc::new(options.url.to_string());
    let payload = options.payload.map(|p| p.to_string());
    let mut tasks = FuturesUnordered::new();
    let start_all = Instant::now();
    for _ in 0..connections {
        let permit_pool = semaphore.clone();
        let url = url.clone();
        let payload = payload.clone();
        tasks.push(tokio::spawn(async move {
            let permit = permit_pool
                .acquire_owned()
                .await
                .map_err(|err| format!("semaphore error: {err}"))?;
            let start_conn = Instant::now();
            let (mut stream, _) = if let Some(timeout) = timeout {
                match time::timeout(timeout, connect_async(url.as_str())).await {
                    Ok(Ok(result)) => result,
                    Ok(Err(err)) => {
                        drop(permit);
                        return Err(format!("handshake error: {err}"));
                    }
                    Err(_) => {
                        drop(permit);
                        return Err("handshake timeout".into());
                    }
                }
            } else {
                match connect_async(url.as_str()).await {
                    Ok(result) => result,
                    Err(err) => {
                        drop(permit);
                        return Err(format!("handshake error: {err}"));
                    }
                }
            };
            let handshake_ms = start_conn.elapsed().as_secs_f64() * 1000.0;

            if options.messages > 0 {
                for _ in 0..options.messages {
                    let message = match payload.as_ref() {
                        Some(text) => Message::Text(text.clone()),
                        None => Message::Ping(Vec::new()),
                    };
                    if let Some(timeout) = timeout {
                        match time::timeout(timeout, stream.send(message)).await {
                            Ok(Ok(())) => {}
                            Ok(Err(err)) => {
                                let _ = stream.close(None).await;
                                drop(permit);
                                return Err(format!("message send error: {err}"));
                            }
                            Err(_) => {
                                let _ = stream.close(None).await;
                                drop(permit);
                                return Err("message send timeout".into());
                            }
                        }
                    } else if let Err(err) = stream.send(message).await {
                        let _ = stream.close(None).await;
                        drop(permit);
                        return Err(format!("message send error: {err}"));
                    }
                }
            }

            let _ = stream.close(None).await;
            drop(permit);
            Ok(WsOutcome { handshake_ms })
        }));
    }

    let mut latencies = Vec::with_capacity(connections as usize);
    let mut success = 0u32;
    let mut failures = 0u32;
    let mut errors: Vec<String> = Vec::new();

    while let Some(result) = tasks.next().await {
        match result {
            Ok(Ok(outcome)) => {
                latencies.push(outcome.handshake_ms);
                success += 1;
            }
            Ok(Err(err)) => {
                failures += 1;
                if errors.len() < 10 {
                    errors.push(err);
                }
            }
            Err(err) => {
                failures += 1;
                if errors.len() < 10 {
                    errors.push(format!("task join error: {err}"));
                }
            }
        }
    }

    let duration_ms = start_all.elapsed().as_millis();
    let report = WsBenchReport {
        url: options.url.to_string(),
        connections,
        success,
        failed: failures,
        duration_ms,
        handshake: build_latency_stats(&latencies),
        errors,
    };

    ProbeEnvelope {
        target: options.url.to_string(),
        success: failures == 0,
        elapsed_ms: duration_ms,
        data: Some(report),
        error: if failures == 0 {
            None
        } else {
            Some(format!("{failures} connections failed"))
        },
    }
}

fn build_latency_stats(samples: &[f64]) -> Option<LatencyStats> {
    if samples.is_empty() {
        return None;
    }
    let mut sorted = samples.to_vec();
    sorted.sort_by(|a, b| {
        a.partial_cmp(b).unwrap_or_else(|| {
            if a.is_nan() && b.is_nan() {
                Ordering::Equal
            } else if a.is_nan() {
                Ordering::Greater
            } else {
                Ordering::Less
            }
        })
    });

    let min = sorted.first().copied().unwrap_or(0.0);
    let max = sorted.last().copied().unwrap_or(0.0);
    let avg = sorted.iter().copied().sum::<f64>() / sorted.len() as f64;
    let p50 = percentile(&sorted, 0.50);
    let p95 = percentile(&sorted, 0.95);

    Some(LatencyStats {
        min_ms: min,
        max_ms: max,
        avg_ms: avg,
        p50_ms: p50,
        p95_ms: p95,
    })
}

fn percentile(sorted: &[f64], quantile: f64) -> f64 {
    if sorted.is_empty() {
        return 0.0;
    }
    let idx = ((sorted.len() - 1) as f64 * quantile)
        .round()
        .clamp(0.0, (sorted.len() - 1) as f64) as usize;
    sorted[idx]
}
