use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use ignore::WalkBuilder;

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileSearchItem {
    pub path: String,
    pub name: String,
    pub size: u64,
    pub is_dir: bool,
    pub modified: u64,
}

#[derive(Debug, Clone, Deserialize)]
pub struct SearchFilesArgs {
    pub pattern: String,
    pub directory: String,
    #[serde(rename = "includePattern")]
    pub include_pattern_camel: Option<String>,
    pub include_pattern: Option<String>,
    #[serde(rename = "excludePattern")]
    pub exclude_pattern_camel: Option<String>,
    pub exclude_pattern: Option<String>,
    #[serde(rename = "maxResults")]
    pub max_results_camel: Option<u32>,
    pub max_results: Option<u32>,
}

fn to_unix_ms(st: std::time::SystemTime) -> u64 {
    st.duration_since(std::time::UNIX_EPOCH)
        .unwrap_or_default()
        .as_millis() as u64
}

fn normalize_seps(s: &str) -> String {
    s.replace('\\', "/")
}

// very small glob: supports * and ? only, case-insensitive
fn wildcard_match_ci(pattern: &str, text: &str) -> bool {
    let pattern_lower = pattern.to_lowercase();
    let text_lower = text.to_lowercase();
    let (mut i, mut j) = (0usize, 0usize);
    let p = pattern_lower.as_bytes();
    let t = text_lower.as_bytes();
    let (plen, tlen) = (p.len(), t.len());

    let mut star: Option<usize> = None;
    let mut match_i: usize = 0;

    while j < tlen {
        if i < plen && (p[i] == b'?' || p[i] == t[j]) {
            i += 1; j += 1;
        } else if i < plen && p[i] == b'*' {
            star = Some(i);
            match_i = j;
            i += 1;
        } else if let Some(si) = star {
            i = si + 1;
            match_i += 1;
            j = match_i;
        } else {
            return false;
        }
    }
    while i < plen && p[i] == b'*' { i += 1; }
    i == plen
}

fn parse_globs(s: &str) -> Vec<String> {
    s.split(',').map(|x| x.trim()).filter(|x| !x.is_empty()).map(|x| x.to_string()).collect()
}

fn path_matches_any(path: &str, patterns: &[String]) -> bool {
    if patterns.is_empty() { return false; }
    let norm = normalize_seps(path);
    for pat in patterns {
        if wildcard_match_ci(pat, &norm) { return true; }
    }
    false
}

fn default_should_skip(path: &str) -> bool {
    let lower = path.to_ascii_lowercase();
    lower.contains("/.git/") || lower.contains("/node_modules/") || lower.contains("/dist/") || lower.contains("/build/") || lower.contains("/target/")
}

#[tauri::command]
pub async fn search_files(args: SearchFilesArgs) -> Result<Vec<FileSearchItem>, String> {
    let dir = PathBuf::from(&args.directory);
    if !dir.exists() { return Ok(vec![]); }

    let include_list = args
        .include_pattern
        .clone()
        .or(args.include_pattern_camel.clone())
        .unwrap_or_default();
    let exclude_list = args
        .exclude_pattern
        .clone()
        .or(args.exclude_pattern_camel.clone())
        .unwrap_or_default();
    let include_globs = parse_globs(&include_list);
    let exclude_globs = parse_globs(&exclude_list);

    let max_results_val = args
        .max_results
        .or(args.max_results_camel)
        .unwrap_or(200);
    let max_results = std::cmp::min(max_results_val as usize, 5000);

    let mut out: Vec<FileSearchItem> = Vec::new();
    let wanted = args.pattern.trim().to_string();
    let want_all = wanted.is_empty() || wanted == "*";

    // 使用 ignore::WalkBuilder 进行高性能遍历并尊重 .gitignore/隐藏文件
    let mut builder = WalkBuilder::new(&dir);
    // 默认会使用 .gitignore 和标准忽略规则，此处限制单线程，便于早停控制
    builder.threads(1);

    for dent in builder.build() {
        let entry = match dent { Ok(e) => e, Err(_) => continue };
        let p = entry.path().to_path_buf();
        let meta = match entry.metadata() { Ok(m) => m, Err(_) => continue };
        if meta.is_dir() { continue; }
        let path_str = p.to_string_lossy().to_string();
        let norm = normalize_seps(&path_str);

        // exclude filters
        if default_should_skip(&norm) || path_matches_any(&norm, &exclude_globs) {
            continue;
        }
        // include filters
        if !include_globs.is_empty() && !path_matches_any(&norm, &include_globs) {
            continue;
        }

        // filename/path pattern
        let name = entry.file_name().to_string_lossy().to_string();
        if !want_all && !wildcard_match_ci(&wanted, &name) && !wildcard_match_ci(&wanted, &norm) {
            continue;
        }

        out.push(FileSearchItem {
            path: path_str.clone(),
            name: name.clone(),
            size: if meta.is_file() { meta.len() } else { 0 },
            is_dir: false,
            modified: meta.modified().ok().map(to_unix_ms).unwrap_or(0),
        });
        if out.len() >= max_results { break; }
    }

    Ok(out)
}

use once_cell::sync::Lazy;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::sync::atomic::AtomicBool;
use uuid::Uuid;
use regex::RegexBuilder;
use std::io::{BufReader, Read};
use tauri::Emitter;

#[derive(Debug, Clone, Deserialize)]
pub struct ContentSearchArgs {
    pub directory: String,
    pub query: String,
    #[serde(rename = "isRegex")]
    pub is_regex: Option<bool>,
    #[serde(rename = "caseSensitive")]
    pub case_sensitive: Option<bool>,
    #[serde(rename = "wholeWord")]
    pub whole_word: Option<bool>,
    #[serde(rename = "includePattern")]
    pub include_pattern_camel: Option<String>,
    pub include_pattern: Option<String>,
    #[serde(rename = "excludePattern")]
    pub exclude_pattern_camel: Option<String>,
    pub exclude_pattern: Option<String>,
    #[serde(rename = "maxResults")]
    pub max_results_camel: Option<u32>,
    pub max_results: Option<u32>,
    #[serde(rename = "maxFileSize")]
    pub max_file_size: Option<u64>, // bytes
}

#[derive(Debug, Clone, Serialize)]
pub struct Submatch {
    pub start: usize,
    pub end: usize,
}

#[derive(Debug, Clone, Serialize)]
pub struct ContentMatch {
    pub path: String,
    pub start_line: u32,
    pub start_col: u32,
    pub end_line: u32,
    pub end_col: u32,
    pub text: String,
    pub preview_lines: Vec<String>,
    // Keep legacy fields for backward compatibility
    pub line_number: u32,
    pub line: String,
    pub submatches: Vec<Submatch>,
}

static SEARCH_CANCEL: Lazy<Mutex<HashMap<String, Arc<AtomicBool>>>> = Lazy::new(|| Mutex::new(HashMap::new()));

struct CancelMapGuard {
    id: String,
}
impl Drop for CancelMapGuard {
    fn drop(&mut self) {
        let _ = SEARCH_CANCEL
            .lock()
            .unwrap_or_else(|e| e.into_inner())
            .remove(&self.id);
    }
}
fn build_regex(query: &str, is_regex: bool, case_sensitive: bool, whole_word: bool) -> Result<regex::Regex, String> {
    // Build base pattern: either raw (when is_regex) or escaped literal
    let mut pattern = if is_regex {
        query.to_string()
    } else {
        regex::escape(query)
    };

    // In non-regex mode, support multi-line queries across both LF and CRLF files by
    // converting literal newlines in the query to an equivalent `\r?\n` regex.
    if !is_regex {
        if query.contains('\n') || query.contains("\r\n") {
            // unify CRLF in the escaped pattern first, then expand LF to CRLF-compatible
            pattern = pattern.replace("\r\n", "\n");
            pattern = pattern.replace('\n', "\\r?\\n");
        }
    }

    // Improve whole-word handling:
    // Only add word boundaries where the query actually starts/ends with a word character.
    // This keeps queries that contain non-word chars (e.g. `<body`, `body>`, `<div id=`) working,
    // while still enforcing word boundaries on the word portions.
    if whole_word {
        let starts_with_word = query.chars().next().map(|c| c.is_alphanumeric() || c == '_').unwrap_or(false);
        let ends_with_word = query.chars().rev().next().map(|c| c.is_alphanumeric() || c == '_').unwrap_or(false);
        if starts_with_word {
            pattern = format!("\\b{}", pattern);
        }
        if ends_with_word {
            pattern = format!("{}\\b", pattern);
        }
    }

    RegexBuilder::new(&pattern)
        .case_insensitive(!case_sensitive)
        .multi_line(true)
        .dot_matches_new_line(true)
        .build()
        .map_err(|e| e.to_string())
}

fn looks_binary_prefix(bytes: &[u8]) -> bool {
    bytes.iter().take(8192).any(|&b| b == 0)
}

#[tauri::command]
pub async fn start_content_search(app: tauri::AppHandle, args: ContentSearchArgs) -> Result<String, String> {
    let dir = PathBuf::from(&args.directory);
    if !dir.exists() { return Err("directory not found".into()); }

    let include_list = args
        .include_pattern
        .clone()
        .or(args.include_pattern_camel.clone())
        .unwrap_or_default();
    let exclude_list = args
        .exclude_pattern
        .clone()
        .or(args.exclude_pattern_camel.clone())
        .unwrap_or_default();
    let include_globs = parse_globs(&include_list);
    let exclude_globs = parse_globs(&exclude_list);

    let max_results_val = args
        .max_results
        .or(args.max_results_camel)
        .unwrap_or(2000) as usize;
    let max_results = std::cmp::min(max_results_val, 100_000);
    let max_file_size = args.max_file_size.unwrap_or(2 * 1024 * 1024); // 2MB default

    let is_regex = args.is_regex.unwrap_or(false);
    let case_sensitive = args.case_sensitive.unwrap_or(false);
    let whole_word = args.whole_word.unwrap_or(false);
    let query = args.query.clone();

    // Debug: print incoming options once
    // println!(
    //     "[search dbg] start_content_search: dir='{}', query='{query}', is_regex={}, case_sensitive={}, whole_word={}, include='{}', exclude='{}', max_results={}, max_file_size={}B",
    //     dir.display(), is_regex, case_sensitive, whole_word, include_list, exclude_list, max_results, max_file_size
    // );

    let re = build_regex(&query, is_regex, case_sensitive, whole_word)?;

    let id = Uuid::new_v4().to_string();
    let id_for_emit = id.clone();
    let cancel_flag = Arc::new(AtomicBool::new(false));
    SEARCH_CANCEL.lock().unwrap_or_else(|e| e.into_inner()).insert(id.clone(), cancel_flag.clone());

    let app_handle = app.clone();
    let root = dir.clone();

    tauri::async_runtime::spawn_blocking(move || {
        let mut builder = WalkBuilder::new(&root);
        builder.threads(1);
        let walker = builder.build();
        let mut emitted: usize = 0;
        let mut batch: Vec<ContentMatch> = Vec::with_capacity(64);
        let _guard = CancelMapGuard { id: id_for_emit.clone() };
        // Debug: limit per-search match logs
        // let mut dbg_logged: usize = 0;

        for dent in walker {
            if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) { break; }
            let entry = match dent { Ok(e) => e, Err(_) => continue };
            let meta = match entry.metadata() { Ok(m) => m, Err(_) => continue };
            if meta.is_dir() { continue; }
            if meta.len() > max_file_size { continue; }

            let path = entry.path().to_path_buf();
            let path_str = path.to_string_lossy().to_string();
            let norm = normalize_seps(&path_str);

            if default_should_skip(&norm) || path_matches_any(&norm, &exclude_globs) { continue; }
            if !include_globs.is_empty() && !path_matches_any(&norm, &include_globs) { continue; }

            let file = match std::fs::File::open(&path) { Ok(f) => f, Err(_) => continue };
            let mut reader = BufReader::new(file);
            let mut prefix = [0u8; 4096];
            let n = match reader.read(&mut prefix) { Ok(n) => n, Err(_) => 0 };
            if looks_binary_prefix(&prefix[..n]) { continue; }
            
            let file_content = match std::fs::read_to_string(&path) {
                Ok(content) => content,
                Err(_) => continue,
            };
            if looks_binary_prefix(file_content.as_bytes()) { continue; }
            
            let lines: Vec<&str> = file_content.lines().collect();
            let mut matches_found = 0;
            
            // Find all matches in the file content
            for m in re.find_iter(&file_content) {
                if cancel_flag.load(std::sync::atomic::Ordering::Relaxed) { break; }
                
                let match_start = m.start();
                let matched_text = m.as_str();
                
                // Calculate line/column positions (character-based, not bytes)
                let before_match = &file_content[..match_start];
                let start_line = before_match.matches('\n').count() as u32 + 1;

                // Determine the byte start of the current line
                let line_byte_start = before_match.rfind('\n').map_or(0, |pos| pos + 1);
                let local_start_byte = match_start - line_byte_start; // byte offset within the line

                // Safe: get the content of the first line containing the match
                let first_line_str = *lines.get((start_line - 1) as usize).unwrap_or(&"");

                // Convert byte offset to character index within the line
                let start_col_chars = first_line_str
                    .get(..std::cmp::min(local_start_byte, first_line_str.len()))
                    .map(|s| s.chars().count())
                    .unwrap_or(0) as u32;
                let start_col = start_col_chars + 1; // 1-based

                // Compute end line and end column (character-based)
                let matched_newlines = matched_text.matches('\n').count() as u32;
                let end_line = start_line + matched_newlines;
                let matched_chars = matched_text.chars().count() as u32;
                let end_col: u32 = if end_line == start_line {
                    // single-line match: inclusive end column
                    start_col + matched_chars - 1
                } else {
                    // multi-line match: end column is on the last line (1-based)
                    let last_line_len = matched_text
                        .rsplit('\n')
                        .next()
                        .unwrap_or("")
                        .chars()
                        .count() as u32;
                    last_line_len + 1
                };
                
                // Extract preview lines
                let context_lines = 2;
                let start_preview_line = std::cmp::max(1, start_line as i32 - context_lines as i32) as usize;
                let end_preview_line = std::cmp::min(lines.len(), (end_line as usize) + context_lines);
                let preview_lines: Vec<String> = lines[(start_preview_line - 1)..end_preview_line]
                    .iter()
                    .map(|&s| s.to_string())
                    .collect();
                
                // Legacy single-line representation for backward compatibility (character indices)
                let first_line_of_match = first_line_str.to_string();
                let line_start = (start_col - 1) as usize; // 0-based char index
                let line_end = if end_line == start_line {
                    // end is exclusive in preview slicing on frontend, so use start + matched_chars
                    let end_char_idx = line_start + (matched_chars as usize);
                    std::cmp::min(end_char_idx, first_line_of_match.chars().count())
                } else {
                    // multi-line: highlight until end of the first line
                    first_line_of_match.chars().count()
                };

                // Debug: log limited number of matches for troubleshooting boundary issues
                // if dbg_logged < 30 || query.contains('>') {
                //     println!(
                //         "[search dbg] file='{}' s={{{}:{}}} e={{{}:{}}} sub=[{},{}] match='{}' first_line_len={} matched_chars={}",
                //         norm,
                //         start_line, start_col,
                //         end_line, end_col,
                //         line_start, line_end,
                //         matched_text.replace('\n', "\\n"),
                //         first_line_of_match.chars().count(),
                //         matched_chars
                //     );
                //     dbg_logged += 1;
                // }
                
                batch.push(ContentMatch {
                    path: path_str.clone(),
                    start_line,
                    start_col,
                    end_line,
                    end_col,
                    text: matched_text.to_string(),
                    preview_lines,
                    // Legacy fields
                    line_number: start_line,
                    line: first_line_of_match.clone(),
                    submatches: vec![Submatch { start: line_start, end: line_end }],
                });
                
                matches_found += 1;
                if matches_found >= 100 { break; }
                
                if batch.len() >= 50 {
                    let _ = app_handle.emit(&format!("search:results:{}", id_for_emit), &batch);
                    emitted += batch.len();
                    batch.clear();
                }
                if emitted >= max_results { break; }
            }
        }

        if !batch.is_empty() {
            let _ = app_handle.emit(&format!("search:results:{}", id_for_emit), &batch);
        }
        let _ = app_handle.emit(&format!("search:done:{}", id_for_emit), &emitted);
    });

    Ok(id)
}

#[tauri::command]
pub async fn cancel_content_search(id: String) -> Result<(), String> {
    let maybe_flag = SEARCH_CANCEL
        .lock()
        .unwrap_or_else(|e| e.into_inner())
        .remove(&id);
    if let Some(flag) = maybe_flag {
        flag.store(true, std::sync::atomic::Ordering::Relaxed);
        Ok(())
    } else {
        Err("not found".into())
    }
}