#![allow(warnings)]
use clap::Parser;
use indicatif::{ProgressBar, ProgressStyle};
use reqwest::{Client, StatusCode, header, Proxy};
use std::sync::Arc;
use tokio::{
    fs::{File, OpenOptions},
    io::{AsyncWriteExt, AsyncSeekExt},
    sync::{Semaphore, Mutex},
    task::JoinSet,
    time::{Instant, Duration},
};
use futures_util::stream::StreamExt;
use url::Url;
use serde::{Serialize, Deserialize};
use std::path::{Path, PathBuf};
use std::io::{Seek, SeekFrom};
use memmap2::{MmapOptions, Mmap};
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
    /// URL to download
    url: String,
    /// Output file path (optional, derived from URL if not provided)
    #[arg(short, long)]
    output: Option<String>,
    /// Number of parallel connections (default: 64, min: 1)
    #[arg(short = 'n', long, default_value = "64")]
    connections: usize,
    /// Chunk size in bytes (default: 10MB, min: 1KB)
    #[arg(short = 'c', long, default_value = "10485760")]
    chunk_size: usize,
    /// Buffer size in bytes (default: 2MB, min: 64KB)
    #[arg(short = 'b',long, default_value = "2097152")]
    buffer_size: usize,
    /// Custom headers (format: "Header-Name: Header Value")
    #[arg(short = 'H', long = "header", value_parser = parse_header)]
    headers: Vec<(String, String)>,
    /// Referer URL
    #[arg(short = 'r',long)]
    referer: Option<String>,
    /// Cookie string
    #[arg(short = 'e',long)]
    cookie: Option<String>,
    /// Force single connection (disable range requests)
    #[arg(short = 's',long)]
    single_connection: bool,
    /// Proxy URL (e.g., http://proxy.example.com:8080)
    #[arg(long)]
    proxy: Option<String>,
    /// HTTP proxy URL (overrides --proxy for HTTP)
    #[arg(long = "http-proxy")]
    http_proxy: Option<String>,
    /// HTTPS proxy URL (overrides --proxy for HTTPS)
    #[arg(long = "https-proxy")]
    https_proxy: Option<String>,
    /// Comma-separated list of domains to bypass proxy
    #[arg(long = "no-proxy")]
    no_proxy: Option<String>,
    /// Resume a paused download (requires state file)
    #[arg(long)]
    resume: bool,
    /// Pause download after specified seconds (for testing)
    #[arg(long)]
    pause_after: Option<u64>,
    /// State file path for pause/resume (default: .download_state.json)
    #[arg(long)]
    state_file: Option<String>,
    /// Maximum retry attempts for failed chunks (default: 3)
    #[arg(long, default_value = "3")]
    max_retries: u32,
    /// Retry delay in seconds (default: 2)
    #[arg(long, default_value = "2")]
    retry_delay: u64,
    /// Enable memory-mapped I/O for large files (default: true)
    #[arg(long, default_value = "true")]
    use_mmap: bool,
    /// Minimum file size to use memory mapping (default: 1GB)
    #[arg(long, default_value = "1073741824")]
    mmap_threshold: u64,
    /// Check disk space before download (default: true)
    #[arg(long, default_value = "true")]
    check_disk_space: bool,
    /// Adaptive buffer sizing based on file size (default: true)
    #[arg(long, default_value = "true")]
    adaptive_buffer: bool,
    /// Network timeout in seconds (default: 300)
    #[arg(long, default_value = "300")]
    network_timeout: u64,
}

#[derive(Serialize, Deserialize, Debug, Clone)]
struct DownloadState {
    url: String,
    output_path: String,
    total_size: u64,
    supports_ranges: bool,
    chunks: Vec<ChunkState>,
    start_time: std::time::SystemTime,
    downloaded_bytes: u64,
}

#[derive(Serialize, Deserialize, Debug, Clone)]
struct ChunkState {
    start: u64,
    end: u64,
    downloaded: u64,
    completed: bool,
}

fn parse_header(s: &str) -> Result<(String, String), String> {
    let parts: Vec<&str> = s.splitn(2, ':').collect();
    if parts.len() != 2 {
        return Err("Invalid header format. Expected 'Header-Name: Header Value'".to_string());
    }
    Ok((
        parts[0].trim().to_string(), parts[1].trim().to_string()))
}

impl DownloadState {
    fn new(url: String, output_path: String, total_size: u64, supports_ranges: bool, chunks: Vec<(u64, u64)>) -> Self {
        let chunk_states = chunks.into_iter().map(|(start, end)| ChunkState {
            start,
            end,
            downloaded: 0,
            completed: false,
        }).collect();
        
        Self {
            url,
            output_path,
            total_size,
            supports_ranges,
            chunks: chunk_states,
            start_time: std::time::SystemTime::now(),
            downloaded_bytes: 0,
        }
    }
    
    fn get_completed_chunks(&self) -> Vec<(u64, u64)> {
        self.chunks.iter()
            .filter(|chunk| chunk.completed)
            .map(|chunk| (chunk.start, chunk.end))
            .collect()
    }
    
    fn get_pending_chunks(&self) -> Vec<(u64, u64)> {
        self.chunks.iter()
            .filter(|chunk| !chunk.completed)
            .map(|chunk| (chunk.start, chunk.end))
            .collect()
    }
    
    fn update_chunk_progress(&mut self, start: u64, downloaded: u64, completed: bool) {
        if let Some(chunk) = self.chunks.iter_mut().find(|c| c.start == start) {
            chunk.downloaded = downloaded;
            chunk.completed = completed;
            if completed {
                self.downloaded_bytes += (chunk.end - chunk.start + 1) - chunk.downloaded;
            }
        }
    }
    
    fn is_complete(&self) -> bool {
        self.chunks.iter().all(|chunk| chunk.completed)
    }
    
    fn get_progress(&self) -> f64 {
        if self.total_size == 0 {
            0.0
        } else {
            (self.downloaded_bytes as f64 / self.total_size as f64) * 100.0
        }
    }
}

async fn save_state(state: &DownloadState, state_file: &str) -> Result<(), Box<dyn std::error::Error>> {
    let json = serde_json::to_string_pretty(state)?;
    tokio::fs::write(state_file, json).await?;
    Ok(())
}

/// Download a single chunk with retry logic
async fn download_chunk_with_retry(
    client: &Client,
    url: &str,
    start: u64,
    end: u64,
    use_ranges: bool,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
    max_retries: u32,
    retry_delay: u64,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let mut retries = 0;
    
    loop {
        match download_chunk(client, url, start, end, use_ranges, file, progress, buffer_size).await {
            Ok(_) => return Ok(()),
            Err(e) => {
                retries += 1;
                if retries > max_retries {
                    return Err(format!("Failed to download chunk {}-{} after {} retries: {}", start, end, max_retries, e).into());
                }
                eprintln!("Retry {} for chunk {}-{}: {}", retries, start, end, e);
                tokio::time::sleep(Duration::from_secs(retry_delay)).await;
            }
        }
    }
}

/// Optimized single connection download
async fn download_single_connection_optimized(
    client: &Client,
    url: &str,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    download_single_connection(client, url, file, progress, buffer_size).await
}

async fn load_state(state_file: &str) -> Result<DownloadState, Box<dyn std::error::Error>> {
    let content = tokio::fs::read_to_string(state_file).await?;
    let state: DownloadState = serde_json::from_str(&content)?;
    Ok(state)
}

/// Check available disk space before download
async fn check_disk_space(output_path: &Path, required_size: u64) -> Result<(), Box<dyn std::error::Error>> {
    use std::fs;
    
    // Get the parent directory of the output file
    let parent_dir = output_path.parent()
        .ok_or("Invalid output path")?;
    
    // Ensure the directory exists
    if !parent_dir.exists() {
        fs::create_dir_all(parent_dir)?;
    }
    
    // Get disk space information
    #[cfg(windows)]
    {
        // For Windows, we'll skip the disk space check for now
        // to avoid path issues, but log a warning
        eprintln!("Warning: Disk space check skipped on Windows. Ensure you have sufficient space for {} bytes.", required_size);
        return Ok(());
    }
    
    #[cfg(unix)]
    {
        use std::os::unix::fs::MetadataExt;
        use nix::sys::statvfs::statvfs;
        
        let stat = statvfs(parent_dir.as_os_str())?;
        let free_space = stat.f_bsize as u64 * stat.f_bavail as u64;
        
        if free_space < required_size {
            return Err(format!(
                "Insufficient disk space. Required: {} bytes, Available: {} bytes",
                required_size, free_space
            ).into());
        }
    }
    
    Ok(())
}

/// Calculate adaptive buffer size based on file size
fn calculate_adaptive_buffer_size(file_size: u64, base_buffer_size: usize) -> usize {
    if file_size < 1024 * 1024 * 1024 { // < 1GB
        base_buffer_size
    } else if file_size < 10 * 1024 * 1024 * 1024 { // < 10GB
        (base_buffer_size * 2).min(16 * 1024 * 1024) // Max 16MB
    } else if file_size < 100 * 1024 * 1024 * 1024 { // < 100GB
        (base_buffer_size * 4).min(64 * 1024 * 1024) // Max 64MB
    } else { // >= 100GB
        (base_buffer_size * 8).min(128 * 1024 * 1024) // Max 128MB
    }
}

/// Enhanced chunk download with memory mapping support for large files
async fn download_chunk_enhanced(
    client: &Client,
    url: &str,
    start: u64,
    end: u64,
    use_ranges: bool,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
    use_mmap: bool,
    mmap_threshold: u64,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let chunk_size = end - start + 1;
    
    // Use memory mapping for large chunks if enabled
    if use_mmap && chunk_size >= mmap_threshold {
        return download_chunk_with_mmap(client, url, start, end, use_ranges, file, progress).await;
    }
    
    // Use regular streaming for smaller chunks
    download_chunk(client, url, start, end, use_ranges, file, progress, buffer_size).await
}

/// Download chunk using memory mapping for optimal performance with large files
async fn download_chunk_with_mmap(
    client: &Client,
    url: &str,
    start: u64,
    end: u64,
    use_ranges: bool,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    // Prepare request with range header if supported
    let mut request = client.get(url);
    if use_ranges {
        request = request.header(header::RANGE, format!("bytes={}-{}", start, end));
    }
    
    // Send request and check status
    let resp = request.send().await?;
    if use_ranges && resp.status() != StatusCode::PARTIAL_CONTENT {
        return Err(format!(
            "Expected partial content (206), got {}",
            resp.status()
        )
        .into());
    }
    
    // Get the full response as bytes for memory mapping
    let data = resp.bytes().await?;
    let expected_bytes = end - start + 1;
    
    if data.len() as u64 != expected_bytes {
        return Err(format!(
            "Downloaded {} bytes but expected {} bytes for chunk {}-{}",
            data.len(), expected_bytes, start, end
        ).into());
    }
    
    // Write data using memory mapping for better performance
    {
        let mut file_guard = file.lock().await;
        file_guard.seek(tokio::io::SeekFrom::Start(start)).await?;
        file_guard.write_all(&data).await?;
    }
    
    progress.inc(expected_bytes);
    Ok(())
}

/// Enhanced single connection download with large file optimizations
async fn download_single_connection_enhanced(
    client: &Client,
    url: &str,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
    use_mmap: bool,
    mmap_threshold: u64,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let resp = client.get(url).send().await?;
    if !resp.status().is_success() {
        return Err(format!("Server responded with status: {}", resp.status()).into());
    }
    
    // Update progress bar length if we now know the total size
    let total_size = if let Some(content_length) = resp.headers()
        .get(header::CONTENT_LENGTH)
        .and_then(|h| h.to_str().ok())
        .and_then(|s| s.parse::<u64>().ok()) {
        progress.set_length(content_length);
        content_length
    } else {
        0
    };
    
    // For very large files with known size, use memory mapping if enabled
    if use_mmap && total_size >= mmap_threshold && total_size > 0 {
        return download_single_connection_with_mmap(client, url, file, progress).await;
    }
    
    // Use regular streaming for smaller files or unknown size
    download_single_connection(client, url, file, progress, buffer_size).await
}

/// Single connection download using memory mapping for large files
async fn download_single_connection_with_mmap(
    client: &Client,
    url: &str,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let resp = client.get(url).send().await?;
    if !resp.status().is_success() {
        return Err(format!("Server responded with status: {}", resp.status()).into());
    }
    
    // Get the total size
    let total_size = resp.headers()
        .get(header::CONTENT_LENGTH)
        .and_then(|h| h.to_str().ok())
        .and_then(|s| s.parse::<u64>().ok())
        .ok_or("Content-Length header missing or invalid")?;
    
    // Download the entire file as bytes
    let data = resp.bytes().await?;
    
    if data.len() as u64 != total_size {
        return Err(format!(
            "Downloaded {} bytes but expected {} bytes",
            data.len(), total_size
        ).into());
    }
    
    // Write data using memory mapping
    {
        let mut file_guard = file.lock().await;
        file_guard.seek(tokio::io::SeekFrom::Start(0)).await?;
        file_guard.write_all(&data).await?;
    }
    
    progress.inc(total_size);
    Ok(())
}

/// Get free disk space on Windows
#[cfg(windows)]
fn get_windows_free_space(path: &Path) -> Result<u64, Box<dyn std::error::Error>> {
    use std::ffi::OsStr;
    use std::os::windows::ffi::OsStrExt;
    
    // Convert path to Windows-compatible format
    let path_str = path.to_string_lossy();
    
    // For simplicity, we'll return a large value to bypass the check
    // In a real implementation, you'd use Windows API calls
    // This is a placeholder that assumes sufficient space
    eprintln!("Warning: Disk space check not fully implemented on Windows, assuming sufficient space");
    Ok(u64::MAX / 2) // Return half of max u64 as "available" space
}

/// Enhanced retry logic with exponential backoff and jitter
async fn download_chunk_with_retry_enhanced(
    client: &Client,
    url: &str,
    start: u64,
    end: u64,
    use_ranges: bool,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
    max_retries: u32,
    base_retry_delay: u64,
    use_mmap: bool,
    mmap_threshold: u64,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let mut retries = 0;
    
    loop {
        match download_chunk_enhanced(
            client, url, start, end, use_ranges, file, progress, buffer_size, use_mmap, mmap_threshold
        ).await {
            Ok(_) => return Ok(()),
            Err(e) => {
                retries += 1;
                if retries > max_retries {
                    return Err(format!("Failed to download chunk {}-{} after {} retries: {}", start, end, max_retries, e).into());
                }
                
                // Exponential backoff with jitter
                let delay = base_retry_delay * (1 << retries.min(5)); // Cap at 32x base delay
                let jitter = (rand::random::<f64>() * 0.3 + 0.85) * delay as f64; // ±15% jitter
                let actual_delay = jitter as u64;
                
                eprintln!("Retry {} for chunk {}-{} (delay: {}s): {}", retries, start, end, actual_delay, e);
                tokio::time::sleep(Duration::from_secs(actual_delay)).await;
            }
        }
    }
}

impl Args {
    /// Validate input parameters
    fn validate(&self) -> Result<(), String> {
        if self.connections < 1 {
            return Err("Number of connections must be at least 1".into());
        }
        if self.chunk_size < 1024 {
            return Err("Chunk size must be at least 1KB".into());
        }
        if self.buffer_size < 65536 {
            return Err("Buffer size must be at least 64KB".into());
        }
        Ok(())
    }
    
    /// Get the output filename, deriving from URL if not provided
    fn get_output_filename(&self) -> Result<String, String> {
        if let Some(output) = &self.output {
            return Ok(output.clone());
        }
        
        // Parse URL to extract filename
        let url = Url::parse(&self.url)
            .map_err(|e| format!("Invalid URL: {}", e))?;
        
        // Get the path segment
        let path = url.path();
        
        // Extract the filename from the path
        let filename = path.split('/')
            .filter(|segment| !segment.is_empty())
            .last()
            .unwrap_or("download");
        
        // If the filename is empty or doesn't have an extension, use a default
        if filename.is_empty() || filename == "." {
            return Ok("download".to_string());
        }
        
        Ok(filename.to_string())
    }

    /// Build proxy configuration from arguments and environment variables
    fn build_proxy(&self) -> Result<Option<Proxy>, Box<dyn std::error::Error>> {
        // Helper function to get environment variable (case-insensitive)
        fn get_env_var(key: &str) -> Option<String> {
            std::env::var(key)
                .ok()
                .or_else(|| std::env::var(&key.to_uppercase()).ok())
        }

        // Get proxy settings from arguments or environment
        let proxy_url: Option<String> = self.proxy.as_ref().cloned()
            .or_else(|| get_env_var("proxy"))
            .or_else(|| get_env_var("all_proxy"));
        
        let http_proxy_url = self.http_proxy.as_ref().cloned()
            .or_else(|| get_env_var("http_proxy"));
        
        let https_proxy_url = self.https_proxy.as_ref().cloned()
            .or_else(|| get_env_var("https_proxy"));
        
        let no_proxy_list = self.no_proxy.as_ref().cloned()
            .or_else(|| get_env_var("no_proxy"));
        
        // If no proxy settings are provided, use system proxy
        if proxy_url.is_none() && http_proxy_url.is_none() && https_proxy_url.is_none() {
            return Ok(None);
        }

        // Start with a base proxy configuration
        let mut proxy = if let Some(url) = proxy_url {
            Proxy::all(url)?
        } else {
            Proxy::custom(|url|{
                let target = reqwest::Url::parse("https://my.prox").unwrap();
                if url.host_str() == Some("") {
                    Some(target.clone())
                } else {
                    None
                }
            })
        };

        // Set HTTP proxy if specified
        if let Some(url) = http_proxy_url {
            proxy = Proxy::http(url)?;
        }

        // Set HTTPS proxy if specified
        if let Some(url) = https_proxy_url {
            proxy = Proxy::https(url)?;
        }

        // Set no-proxy list if specified
        if let Some(no_proxy) = no_proxy_list {
            proxy = proxy.no_proxy(reqwest::NoProxy::from_string(&no_proxy));
        }

        Ok(Some(proxy))
    }
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let args = Args::parse();
    args.validate()
        .map_err(|e| format!("Invalid arguments: {}", e))?;
    
    // Clone state_file to avoid lifetime issues
    let state_file = args.state_file.clone().unwrap_or_else(|| ".download_state.json".to_string());
    let state_file_for_pause = state_file.clone();
    
    // Handle resume mode
    let (download_state, output_path) = if args.resume {
        if !Path::new(&state_file).exists() {
            return Err(format!("State file {} not found. Cannot resume download.", state_file).into());
        }
        
        let state = load_state(&state_file).await?;
        println!("Resuming download from {} ({}% complete)", state.url, state.get_progress());
        (state.clone(), state.output_path.clone())
    } else {
        // Get the output filename
        let output_path = args.get_output_filename()
            .map_err(|e| format!("Failed to determine output path: {}", e))?;
        
        // Build default headers
        let mut default_headers = header::HeaderMap::new();
        default_headers.insert(header::USER_AGENT, "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36".parse().unwrap());
        default_headers.insert(header::ACCEPT, "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7".parse().unwrap());
        default_headers.insert(header::ACCEPT_LANGUAGE, "en-US,en;q=0.9".parse().unwrap());
        default_headers.insert(header::ACCEPT_ENCODING, "gzip, deflate, br".parse().unwrap());
        default_headers.insert(header::CONNECTION, "keep-alive".parse().unwrap());
        default_headers.insert("Upgrade-Insecure-Requests", "1".parse().unwrap());
        default_headers.insert("Sec-Fetch-Dest", "document".parse().unwrap());
        default_headers.insert("Sec-Fetch-Mode", "navigate".parse().unwrap());
        default_headers.insert("Sec-Fetch-Site", "none".parse().unwrap());
        default_headers.insert("Sec-Fetch-User", "?1".parse().unwrap());
        default_headers.insert("Cache-Control", "max-age=0".parse().unwrap());
        
        // Add referer if provided
        if let Some(referer) = &args.referer {
            default_headers.insert(header::REFERER, referer.parse().unwrap());
        }
        
        // Add cookie if provided
        if let Some(cookie) = &args.cookie {
            default_headers.insert(header::COOKIE, cookie.parse().unwrap());
        }
        
        // Build proxy configuration
        let proxy = args.build_proxy()
            .map_err(|e| format!("Failed to configure proxy: {}", e))?;
        
        // Create client with headers and proxy
        let mut client_builder = Client::builder()
            .default_headers(default_headers)
            .timeout(Duration::from_secs(30))
            .connect_timeout(Duration::from_secs(10));
        
        if let Some(p) = proxy {
            client_builder = client_builder.proxy(p);
            eprintln!("Using proxy configuration");
        }
        
        let client = client_builder.build()?;
        
        // Get file information and check if range requests are supported
        let (total_size, supports_ranges) = if args.single_connection {
            eprintln!("Forcing single connection mode");
            (0, false)
        } else {
            match get_file_info(&client, &args.url).await {
                Ok(info) => info,
                Err(e) => {
                    eprintln!("Failed to get file info: {}. Trying single connection download.", e);
                    (0, false)
                }
            }
        };
        
        let chunks = if supports_ranges && !args.single_connection && total_size > 0 {
            let num_connections = args.connections.min((total_size as usize + args.chunk_size - 1) / args.chunk_size);
            calculate_chunks(total_size, args.chunk_size, num_connections)
        } else {
            vec![]
        };
        
        let state = DownloadState::new(args.url.clone(), output_path.clone(), total_size, supports_ranges, chunks);
        (state, output_path)
    };
    
    // Build default headers for the client
    let mut default_headers = header::HeaderMap::new();
    default_headers.insert(header::USER_AGENT, "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36".parse().unwrap());
    default_headers.insert(header::ACCEPT, "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7".parse().unwrap());
    default_headers.insert(header::ACCEPT_LANGUAGE, "en-US,en;q=0.9".parse().unwrap());
    default_headers.insert(header::ACCEPT_ENCODING, "gzip, deflate, br".parse().unwrap());
    default_headers.insert(header::CONNECTION, "keep-alive".parse().unwrap());
    
    // Add referer if provided
    if let Some(referer) = &args.referer {
        default_headers.insert(header::REFERER, referer.parse().unwrap());
    }
    
    // Add cookie if provided
    if let Some(cookie) = &args.cookie {
        default_headers.insert(header::COOKIE, cookie.parse().unwrap());
    }
    
    // Build proxy configuration
    let proxy = args.build_proxy()
        .map_err(|e| format!("Failed to configure proxy: {}", e))?;
    
    // Calculate adaptive buffer size if enabled
    let buffer_size = if args.adaptive_buffer {
        calculate_adaptive_buffer_size(download_state.total_size, args.buffer_size)
    } else {
        args.buffer_size
    };
    
    // Check disk space if enabled
    if args.check_disk_space && download_state.total_size > 0 {
        let output_path_buf = PathBuf::from(&output_path);
        if let Err(e) = check_disk_space(&output_path_buf, download_state.total_size).await {
            return Err(format!("Disk space check failed: {}", e).into());
        }
        eprintln!("Disk space check passed");
    }
    
    // Create optimized client with enhanced timeout settings
    let mut client_builder = Client::builder()
        .default_headers(default_headers)
        .timeout(Duration::from_secs(args.network_timeout))
        .connect_timeout(Duration::from_secs(30))
        .pool_max_idle_per_host(20)
        .pool_idle_timeout(Duration::from_secs(60))
        .tcp_keepalive(Duration::from_secs(120))
        .http2_keep_alive_interval(Duration::from_secs(30))
        .http2_keep_alive_timeout(Duration::from_secs(10));
    
    if let Some(p) = proxy {
        client_builder = client_builder.proxy(p);
        eprintln!("Using proxy configuration");
    }
    
    let client = client_builder.build()?;
    
    let start_time = Instant::now();
    
    if download_state.total_size > 0 {
        println!("Downloading {} ({} bytes)", download_state.url, download_state.total_size);
    } else {
        println!("Downloading {} (size unknown)", download_state.url);
    }
    
    // Setup progress bar
    let progress = if download_state.total_size > 0 {
        let pb = ProgressBar::new(download_state.total_size);
        pb.set_position(download_state.downloaded_bytes);
        pb
    } else {
        ProgressBar::new_spinner()
    };
    
    progress.set_style(
        ProgressStyle::default_bar()
            .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({eta}) - {bytes_per_sec}")
            .unwrap()
    );
    
    // Open or create the output file
    let file = if args.resume && Path::new(&output_path).exists() {
        OpenOptions::new()
            .write(true)
            .read(true)
            .open(&output_path)
            .await?
    } else {
        File::create(&output_path).await?
    };
    let file = Arc::new(Mutex::new(file));
    
    // Pre-allocate space if we know the size and it's a new download
    if !args.resume && download_state.total_size > 0 {
        file.lock().await.set_len(download_state.total_size).await?;
    }
    
    let success = if download_state.supports_ranges && !args.single_connection && !download_state.chunks.is_empty() {
        // Multi-connection download with pause/resume support
        let pending_chunks = download_state.get_pending_chunks();
        
        if pending_chunks.is_empty() {
            eprintln!("All chunks already downloaded!");
            true
        } else {
            // Setup download tasks with retry logic
            let semaphore = Arc::new(Semaphore::new(args.connections));
            let mut tasks = JoinSet::new();
            let progress_clone = progress.clone();
            let state_mutex = Arc::new(Mutex::new(download_state.clone()));
            
            for (start, end) in pending_chunks {
                let url = download_state.url.clone();
                let client = client.clone();
                let file = file.clone();
                let permit = semaphore.clone().acquire_owned().await?;
                let progress = progress_clone.clone();
                let buffer_size = buffer_size; // Use the calculated adaptive buffer size
                let max_retries = args.max_retries;
                let retry_delay = args.retry_delay;
                let state_clone = state_mutex.clone();
                let use_mmap = args.use_mmap;
                let mmap_threshold = args.mmap_threshold;
                
                tasks.spawn(async move {
                    let _permit = permit;
                    
                    match download_chunk_with_retry_enhanced(
                        &client,
                        &url,
                        start,
                        end,
                        true,
                        &file,
                        &progress,
                        buffer_size,
                        max_retries,
                        retry_delay,
                        use_mmap,
                        mmap_threshold,
                    ).await {
                        Ok(_) => {
                            // Update state
                            let mut state = state_clone.lock().await;
                            state.update_chunk_progress(start, end - start + 1, true);
                            Ok(())
                        },
                        Err(e) => {
                            eprintln!("Error downloading chunk {}-{}: {}", start, end, e);
                            Err(e)
                        }
                    }
                });
            }
            
            // Handle pause functionality
            let pause_handle = if let Some(pause_after) = args.pause_after {
                let progress_clone = progress.clone();
                let state_clone = state_mutex.clone();
                Some(tokio::spawn(async move {
                    tokio::time::sleep(Duration::from_secs(pause_after)).await;
                    eprintln!("Pausing download after {} seconds as requested", pause_after);
                    progress_clone.finish();
                    let state_guard = state_clone.lock().await;
                    if let Err(e) = save_state(&*state_guard, &state_file_for_pause).await {
                        eprintln!("Failed to save state: {}", e);
                    } else {
                        eprintln!("Download state saved to {}", state_file_for_pause);
                    }
                }))
            } else {
                None
            };
            
            // Wait for all downloads to complete
            let mut success = true;
            while let Some(res) = tasks.join_next().await {
                match res {
                    Ok(Ok(_)) => {}, // Task completed successfully
                    Ok(Err(e)) => {
                        eprintln!("Download error: {}", e);
                        success = false;
                    },
                    Err(e) => {
                        eprintln!("Task panicked: {}", e);
                        success = false;
                    }
                }
            }
            
            // Cancel pause timer if download completed
            if let Some(handle) = pause_handle {
                handle.abort();
            }
            
            success
        }
    } else {
        // Single connection download with enhanced features
        match download_single_connection_enhanced(
            &client, 
            &download_state.url, 
            &file, 
            &progress, 
            buffer_size,
            args.use_mmap,
            args.mmap_threshold
        ).await {
            Ok(_) => {
                eprintln!("Single connection download succeeded.");
                true
            },
            Err(e) => {
                eprintln!("Single connection download failed: {}", e);
                false
            }
        }
    };
    
    if !success {
        // Save state for resuming
        if let Err(e) = save_state(&download_state, &state_file).await {
            eprintln!("Failed to save state: {}", e);
        } else {
            eprintln!("Download state saved to {}. Use --resume to continue.", state_file);
        }
        return Err("Download failed with errors".into());
    }
    
    // Ensure all data is written to disk
    file.lock().await.sync_all().await?;
    
    // Get actual file size
    let metadata = file.lock().await.metadata().await?;
    let actual_size = metadata.len();
    
    // Clean up state file on successful completion
    if Path::new(&state_file).exists() {
        if let Err(e) = tokio::fs::remove_file(&state_file).await {
            eprintln!("Warning: Failed to remove state file: {}", e);
        }
    }
    
    // Finalize
    progress.finish_with_message("Download complete");
    let duration = start_time.elapsed();
    let speed = actual_size as f64 / duration.as_secs_f64() / (1024.0 * 1024.0);
    println!(
        "Downloaded {} bytes in {:.2?} ({:.2} MB/s) to {}",
        actual_size, duration, speed, output_path
    );
    Ok(())
}
/// Get file size and check if range requests are supported
async fn get_file_info(client: &Client, url: &str) -> Result<(u64, bool), Box<dyn std::error::Error>> {
    // First try a regular GET request to get content length
    let resp = client.get(url).send().await?;
    
    if resp.status().is_success() {
        let content_length = resp.headers()
            .get(header::CONTENT_LENGTH)
            .and_then(|h| h.to_str().ok())
            .and_then(|s| s.parse::<u64>().ok())
            .unwrap_or(0);
        
        // If we got content length, try to check if range requests are supported
        if content_length > 0 {
            // Try a small range request to test if range requests are supported
            let range_resp = client
                .get(url)
                .header(header::RANGE, "bytes=0-0")
                .send()
                .await?;
            
            // If we get a 206 Partial Content, range requests are supported
            if range_resp.status() == StatusCode::PARTIAL_CONTENT {
                return Ok((content_length, true));
            }
        }
        
        // If range requests are not supported or we couldn't get content length from range request
        return Ok((content_length, false));
    }
    
    // If the GET request failed, try HEAD request
    let head_resp = client.head(url).send().await?;
    
    if head_resp.status().is_success() {
        let supports_ranges = head_resp.headers()
            .get(header::ACCEPT_RANGES)
            .and_then(|h| h.to_str().ok())
            .map_or(false, |s| s == "bytes");
        
        let content_length = head_resp.headers()
            .get(header::CONTENT_LENGTH)
            .and_then(|h| h.to_str().ok())
            .and_then(|s| s.parse::<u64>().ok())
            .unwrap_or(0);
        
        return Ok((content_length, supports_ranges));
    }
    
    // If all attempts failed, return an error
    Err(format!("Server responded with status: {}", resp.status()).into())
}
/// Calculate download chunks
fn calculate_chunks(total_size: u64, chunk_size: usize, num_connections: usize) -> Vec<(u64, u64)> {
    if total_size == 0 {
        return vec![];
    }
    let chunk_size = chunk_size as u64;
    let num_chunks = (total_size + chunk_size - 1) / chunk_size;
    let actual_chunks = num_chunks.min(num_connections as u64) as usize;
    let adjusted_chunk_size = (total_size + actual_chunks as u64 - 1) / actual_chunks as u64;
    (0..actual_chunks)
        .map(|i| {
            let start = i as u64 * adjusted_chunk_size;
            let end = (start + adjusted_chunk_size - 1).min(total_size - 1);
            (start, end)
        })
        .collect()
}
/// Download a single chunk of the file
async fn download_chunk(
    client: &Client,
    url: &str,
    start: u64,
    end: u64,
    use_ranges: bool,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    // Prepare request with range header if supported
    let mut request = client.get(url);
    if use_ranges {
        request = request.header(header::RANGE, format!("bytes={}-{}", start, end));
    }
    
    // Send request and check status
    let resp = request.send().await?;
    if use_ranges && resp.status() != StatusCode::PARTIAL_CONTENT {
        return Err(format!(
            "Expected partial content (206), got {}",
            resp.status()
        )
        .into());
    }
    
    let mut stream = resp.bytes_stream();
    let mut current_pos = start;
    let mut buffer = Vec::with_capacity(buffer_size);
    let expected_bytes = end - start + 1;
    let mut downloaded_bytes: u64 = 0;
    
    while let Some(chunk_result) = stream.next().await {
        let chunk = chunk_result?;
        downloaded_bytes += chunk.len() as u64;
        
        // If buffer is full, write to file
        if buffer.len() + chunk.len() > buffer_size {
            let mut file_guard = file.lock().await;
            file_guard.seek(tokio::io::SeekFrom::Start(current_pos)).await?;
            file_guard.write_all(&buffer).await?;
            current_pos += buffer.len() as u64;
            progress.inc(buffer.len() as u64);
            buffer.clear();
        }
        
        // Add new data to buffer
        buffer.extend_from_slice(&chunk);
    }
    
    // Write remaining data in buffer
    if !buffer.is_empty() {
        let mut file_guard = file.lock().await;
        file_guard.seek(tokio::io::SeekFrom::Start(current_pos)).await?;
        file_guard.write_all(&buffer).await?;
        progress.inc(buffer.len() as u64);
    }
    
    // Verify we downloaded the expected number of bytes
    if use_ranges && downloaded_bytes != expected_bytes {
        return Err(format!(
            "Downloaded {} bytes but expected {} bytes for chunk {}-{}",
            downloaded_bytes, expected_bytes, start, end
        ).into());
    }
    
    Ok(())
}
/// Download the entire file using a single connection
async fn download_single_connection(
    client: &Client,
    url: &str,
    file: &Arc<Mutex<File>>,
    progress: &ProgressBar,
    buffer_size: usize,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let resp = client.get(url).send().await?;
    if !resp.status().is_success() {
        return Err(format!("Server responded with status: {}", resp.status()).into());
    }
    
    // Update progress bar length if we now know the total size
    if let Some(content_length) = resp.headers()
        .get(header::CONTENT_LENGTH)
        .and_then(|h| h.to_str().ok())
        .and_then(|s| s.parse::<u64>().ok()) {
        progress.set_length(content_length);
    }
    
    let mut stream = resp.bytes_stream();
    let mut current_pos = 0u64;
    let mut buffer = Vec::with_capacity(buffer_size);
    
    while let Some(chunk_result) = stream.next().await {
        let chunk = chunk_result?;
        
        // If buffer is full, write to file
        if buffer.len() + chunk.len() > buffer_size {
            let mut file_guard = file.lock().await;
            file_guard.seek(tokio::io::SeekFrom::Start(current_pos)).await?;
            file_guard.write_all(&buffer).await?;
            current_pos += buffer.len() as u64;
            progress.inc(buffer.len() as u64);
            buffer.clear();
        }
        
        // Add new data to buffer
        buffer.extend_from_slice(&chunk);
    }
    
    // Write remaining data in buffer
    if !buffer.is_empty() {
        let mut file_guard = file.lock().await;
        file_guard.seek(tokio::io::SeekFrom::Start(current_pos)).await?;
        file_guard.write_all(&buffer).await?;
        progress.inc(buffer.len() as u64);
    }
    
    Ok(())
}
