use std::fs::{File, OpenOptions};
use std::io::{Seek, SeekFrom, Write};
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};

use anyhow::{bail, Context, Result};
use clap::Parser;
use futures_util::StreamExt;
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use reqwest::header::{HeaderMap, HeaderValue, ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, RANGE};
use reqwest::{Client, ClientBuilder, Proxy};

#[derive(Parser, Debug)]
#[clap(
    name = "Parallel Downloader",
    about = "Downloads files in parallel chunks",
    version
)]
struct Args {
    /// URL to download
    url: String,

    /// Output file path
    #[clap(short, long)]
    output: Option<PathBuf>,

    /// Output file dir
    #[clap(short, long)]
    dir: Option<PathBuf>,

    /// Number of chunks to download in parallel
    #[clap(short, long, default_value_t = 10)]
    chunks: usize,

    /// Set No proxy
    #[clap(long, default_value_t = false)]
    no_proxy: bool,

    /// Set proxy
    #[clap(long)]
    proxy: Option<String>,

    /// Set timeout in seconds
    #[clap(short, long, default_value_t = 10)]
    timeout: usize,
}

#[tokio::main]
async fn main() -> Result<()> {
    let args = Args::parse();

    // Determine output filename
    let output = match args.output {
        Some(path) => path,
        None => {
            let filename = args.url.split('/').last().unwrap_or("download");
            let dir = args.dir.unwrap_or_else(|| PathBuf::from("."));
            if !dir.exists() {
                std::fs::create_dir_all(&dir)?;
            }
            dir.join(filename)
        }
    };

    println!("Starting download from: {}", args.url);
    println!("Output file: {}", output.display());
    println!("Chunks: {}", args.chunks);
    println!("Timeout: {} seconds", args.timeout);

    let timeout = Duration::from_secs(args.timeout as u64);

    // Build HTTP client
    let client = build_client(args.no_proxy, args.proxy, timeout)?;

    // Get file info (size and range support)
    let (file_size, supports_range) = get_file_info(&client, &args.url).await?;
    if file_size == 0 {
        bail!("File size is 0, cannot download empty file");
    }
    println!("File size: {} bytes", file_size);
    println!("Supports range requests: {}", supports_range);

    // Adjust chunk count based on range support
    let num_chunks = if supports_range {
        args.chunks.min(file_size as usize)
    } else {
        1
    };
    println!("Using {} chunks", num_chunks);

    // Create output file
    let file = setup_file(&output, file_size)?;

    // Setup progress tracking
    let multi_progress = MultiProgress::new();
    let main_progress = multi_progress.add(ProgressBar::new(file_size));
    main_progress.set_style(
        ProgressStyle::default_bar()
           .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({eta})")
           .unwrap()
           .progress_chars("#>-"),
    );

    let start_time = Instant::now();
    let total_progress = Arc::new(Mutex::new(0u64));
    let file = Arc::new(Mutex::new(file));

    // Create chunk download tasks
    let mut handles = Vec::with_capacity(num_chunks);
    for i in 0..num_chunks {
        let (start, end) = calculate_chunk_range(i, num_chunks, file_size);
        if start >= file_size {
            continue;
        }

        let url = args.url.clone();
        let client = client.clone();
        let chunk_progress = multi_progress.add(ProgressBar::new(end - start + 1));
        chunk_progress.set_style(
            ProgressStyle::default_bar()
               .template(&format!("{{spinner:.green}} Chunk {} [{{bar:30.cyan/blue}}] {{bytes}}/{{total_bytes}}", i))
               .unwrap()
               .progress_chars("=> "),
        );

        let main_progress = main_progress.clone();
        let total_progress = total_progress.clone();
        let file = file.clone();

        handles.push(tokio::spawn(async move {
            if let Err(e) = download_chunk(
                client,
                &url,
                start,
                end,
                i,
                chunk_progress,
                main_progress,
                total_progress,
                file,
            )
           .await
            {
                eprintln!("Chunk {} failed: {}", i, e);
                Err(e)
            } else {
                Ok(())
            }
        }));
    }

    // Wait for all tasks to complete
    let mut all_success = true;
    for handle in handles {
        match handle.await {
            Ok(Ok(())) => {}
            Ok(Err(e)) => {
                eprintln!("Download error: {}", e);
                all_success = false;
            }
            Err(e) => {
                eprintln!("Task join error: {}", e);
                all_success = false;
            }
        }
    }

    if !all_success {
        bail!("Some chunks failed to download");
    }

    main_progress.finish_with_message("Download complete");

    let elapsed = start_time.elapsed();
    let speed = file_size as f64 / elapsed.as_secs_f64() / 1_048_576.0;
    println!(
        "Downloaded {} bytes in {:.2}s ({:.2} MB/s)",
        file_size,
        elapsed.as_secs_f64(),
        speed
    );

    Ok(())
}

fn build_client(no_proxy: bool, proxy: Option<String>, timeout: Duration) -> Result<Client> {
    let mut builder = ClientBuilder::new()
       .user_agent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
       .danger_accept_invalid_certs(true)
       .timeout(timeout)
       .redirect(reqwest::redirect::Policy::limited(10));

    if no_proxy {
        builder = builder.no_proxy();
    } else if let Some(p) = proxy {
        builder = builder.proxy(Proxy::all(p)?);
    }

    builder.build().context("Failed to build HTTP client")
}

async fn get_file_info(client: &Client, url: &str) -> Result<(u64, bool)> {
    // Try HEAD request first
    let resp = match client.head(url).send().await {
        Ok(resp) if resp.status().is_success() => resp,
        _ => {
            // Fallback to GET request
            let resp = client.get(url).send().await?;
            if !resp.status().is_success() {
                bail!("Failed to get file info (HTTP {})", resp.status());
            }
            resp
        }
    };

    let content_length = resp.headers()
       .get(CONTENT_LENGTH)
       .and_then(|v| v.to_str().ok())
       .and_then(|v| v.parse().ok())
       .unwrap_or(0);

    let supports_range = resp.headers()
       .get(ACCEPT_RANGES)
       .map(|v| v == "bytes")
       .unwrap_or(false)
        || resp.headers()
           .get(CONTENT_RANGE)
           .is_some();

    Ok((content_length, supports_range))
}

fn setup_file(path: &PathBuf, size: u64) -> Result<File> {
    let file = OpenOptions::new()
       .create(true)
       .write(true)
       .truncate(true)
       .open(path)
       .context("Failed to create output file")?;

    if size > 0 {
        file.set_len(size).context("Failed to preallocate file")?;
    }
    Ok(file)
}

fn calculate_chunk_range(chunk_index: usize, total_chunks: usize, file_size: u64) -> (u64, u64) {
    let chunk_size = file_size / total_chunks as u64;
    let remainder = file_size % total_chunks as u64;

    let start = chunk_index as u64 * chunk_size + std::cmp::min(chunk_index as u64, remainder);
    let end = if chunk_index == total_chunks - 1 {
        file_size - 1
    } else {
        (chunk_index as u64 + 1) * chunk_size + std::cmp::min(chunk_index as u64 + 1, remainder) - 1
    };

    (start, end)
}

async fn download_chunk(
    client: Client,
    url: &str,
    start: u64,
    end: u64,
    chunk_index: usize,
    chunk_progress: ProgressBar,
    main_progress: ProgressBar,
    total_progress: Arc<Mutex<u64>>,
    file: Arc<Mutex<File>>,
) -> Result<()> {
    // Create headers
    let mut headers = HeaderMap::new();
    if start <= end {
        headers.insert(
            RANGE,
            HeaderValue::from_str(&format!("bytes={}-{}", start, end))
               .context("Failed to create Range header")?,
        );
    }

    // Send request
    let resp = client.get(url)
       .headers(headers)
       .send()
       .await
       .context("Failed to send request")?;

    // Validate response status
    if !resp.status().is_success() {
        bail!("Unexpected status code: {}", resp.status());
    }

    // For range requests, validate Content-Range if present
    if start < end {
        if let Some(content_range) = resp.headers().get(CONTENT_RANGE) {
            let content_range_str = content_range.to_str()
               .context("Invalid Content-Range header value")?;

            // Parse Content-Range header (format: "bytes start-end/total" or "bytes */total")
            let parts: Vec<&str> = content_range_str.split_whitespace().collect();
            if parts.len() != 2 || parts[0] != "bytes" {
                bail!("Invalid Content-Range format: '{}'", content_range_str);
            }

            let range_and_total: Vec<&str> = parts[1].split('/').collect();
            if range_and_total.len() != 2 {
                bail!("Invalid Content-Range format: '{}'", content_range_str);
            }

            // Handle wildcard case ("bytes */total")
            if range_and_total[0] != "*" {
                let range: Vec<&str> = range_and_total[0].split('-').collect();
                if range.len() != 2 {
                    bail!("Invalid range format: '{}'", range_and_total[0]);
                }

                let actual_start = range[0].parse::<u64>()
                   .context("Failed to parse range start")?;
                let actual_end = range[1].parse::<u64>()
                   .context("Failed to parse range end")?;

                if actual_start != start || actual_end != end {
                    bail!(
                        "Server returned unexpected range {}-{} (expected {}-{})",
                        actual_start,
                        actual_end,
                        start,
                        end
                    );
                }
            }
        }
    }

    let mut bytes_written = 0u64;
    let mut stream = resp.bytes_stream();

    // Process the response stream
    while let Some(chunk) = stream.next().await {
        let chunk = chunk.context("Failed to read chunk")?;
        let chunk_size = chunk.len() as u64;

        // Write to file
        {
            let mut file = file.lock()
               .map_err(|e| anyhow::anyhow!("Failed to lock file: {}", e))?;
            file.seek(SeekFrom::Start(start + bytes_written))
               .context("Failed to seek in file")?;
            file.write_all(&chunk)
               .context("Failed to write chunk to file")?;
        }

        // Update progress
        bytes_written += chunk_size;
        chunk_progress.inc(chunk_size);

        let mut total = total_progress.lock()
           .map_err(|e| anyhow::anyhow!("Failed to lock progress counter: {}", e))?;
        *total += chunk_size;
        main_progress.set_position(*total);
    }

    // Final size validation (only if we requested a specific range)
    if start <= end {
        let expected_size = end - start + 1;
        if bytes_written != expected_size {
            bail!(
                "Chunk {}: Downloaded {} bytes, expected {}",
                chunk_index,
                bytes_written,
                expected_size
            );
        }
    }

    chunk_progress.finish_with_message(format!("Chunk {} done", chunk_index));
    Ok(())
}    