use std::fs;
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use crate::local_history::entry::TimelineEntry;
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use crate::core::AppState;
use tauri::State;
use once_cell::sync::Lazy;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use tokio::fs as afs;
// removed: use std::fs as sfs;
use tokio::sync::Mutex as AsyncMutex;

// Per-file locks to prevent concurrent writes to the same file's history
static FILE_LOCKS: Lazy<Mutex<HashMap<String, Arc<AsyncMutex<()>>>>> = Lazy::new(|| Mutex::new(HashMap::new()));
// Smart cache for last saved content hash per file to reduce redundant writes
static LAST_SAVED_ID: Lazy<Mutex<HashMap<String, String>>> = Lazy::new(|| Mutex::new(HashMap::new()));

fn get_history_dir(file_path: &str) -> PathBuf {
    let mut hasher = DefaultHasher::new();
    file_path.hash(&mut hasher);
    let hash = hasher.finish();

    let mut history_dir = PathBuf::from(".history");
    history_dir.push(format!("{:x}", hash));
    history_dir
}

// Get a lock for a specific file path to ensure thread safety
fn get_file_lock(file_path: &str) -> Arc<AsyncMutex<()>> {
    let mut locks = FILE_LOCKS.lock().unwrap_or_else(|e| e.into_inner());
    locks
        .entry(file_path.to_string())
        .or_insert_with(|| Arc::new(AsyncMutex::new(())))
        .clone()
}

pub fn get_local_history(file_path: &str) -> Vec<TimelineEntry> {
    let history_dir = get_history_dir(file_path);
    let mut entries = Vec::new();

    if let Ok(read_dir) = fs::read_dir(history_dir) {
        for entry in read_dir.filter_map(Result::ok) {
            let path = entry.path();
            if path.is_file() {
                if let Some(timestamp_str) = path.file_name().and_then(|n| n.to_str()) {
                     if let Some(extension) = path.extension().and_then(|s| s.to_str()) {
                        let timestamp_and_id = timestamp_str.trim_end_matches(&format!(".{}", extension));
                        let parts: Vec<&str> = timestamp_and_id.split('_').collect();
                        if parts.len() == 2 {
                            let timestamp = parts[0].parse::<i64>().unwrap_or(0);
                            let id = parts[1].to_string();

                            entries.push(TimelineEntry {
                                id,
                                message: "Local Save".to_string(),
                                author: None,
                                timestamp,
                                source: "Local".to_string(),
                            });
                        }
                    }
                }
            }
        }
    }

    entries.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
    entries
}

// Check if a history file with the given ID already exists (async)
async fn has_existing_history_with_id(history_dir: &Path, id: &str) -> Result<bool, std::io::Error> {
    let mut rd = match afs::read_dir(history_dir).await {
        Ok(rd) => rd,
        Err(e) => return Err(e),
    };
    while let Ok(Some(entry)) = rd.next_entry().await {
        let path = entry.path();
        if path.is_file() {
            if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
                if name.contains(id) { return Ok(true); }
            }
        }
    }
    Ok(false)
}

#[tauri::command]
pub async fn save_local_history(file_path: &str, content: &str, state: State<'_, AppState>) -> Result<(), String> {
    // Acquire per-file async lock to serialize writes for the same file
    let file_lock = get_file_lock(file_path);
    let _guard = file_lock.lock().await;

    let history_dir = get_history_dir(file_path);

    // Ensure directory exists (async)
    afs::create_dir_all(&history_dir)
        .await
        .map_err(|e| format!("Failed to create history dir: {}", e))?;

    // Current timestamp in ms
    let timestamp = match SystemTime::now().duration_since(UNIX_EPOCH) {
        Ok(d) => d.as_millis() as i64,
        Err(e) => return Err(format!("System time error: {}", e)),
    };

    let id = format!("{:x}", md5::compute(content));

    // Smart caching: skip if same content hash was just saved for this file
    if let Ok(map) = LAST_SAVED_ID.lock() {
        if let Some(last_id) = map.get(file_path) {
            if last_id == &id {
                // Drop lock before return
                drop(map);
                return Ok(());
            }
        }
    }

    if let Some(extension) = Path::new(file_path).extension().and_then(|s| s.to_str()) {
        // Also check if a file with the same id already exists in history directory to avoid duplicates
        match has_existing_history_with_id(&history_dir, &id).await {
            Ok(true) => {
                // Update cache and skip writing
                if let Ok(mut map) = LAST_SAVED_ID.lock() {
                    map.insert(file_path.to_string(), id.clone());
                }
                // Drop the lock before running cleanup to minimize contention
                drop(_guard);
                cleanup_history(file_path, &state);
                return Ok(());
            }
            Ok(false) => {}
            Err(_) => {}
        }

        let file_name = format!("{}_{}.{}", timestamp, id, extension);
        let file_path_to_save = history_dir.join(file_name);
        // Write content asynchronously
        afs::write(&file_path_to_save, content.as_bytes())
            .await
            .map_err(|e| format!("Failed to write history file: {}", e))?;

        // Update last saved id cache
        if let Ok(mut map) = LAST_SAVED_ID.lock() {
            map.insert(file_path.to_string(), id.clone());
        }
    } else {
        return Err("Invalid file extension".to_string());
    }

    // Drop the lock before running cleanup to minimize contention
    drop(_guard);

    // Run cleanup synchronously (fast) on the current thread; it's mostly small directory scans
    cleanup_history(file_path, &state);

    Ok(())
}

// Default configuration values
const DEFAULT_MAX_FILES_PER_PROJECT: i32 = 1000;
const DEFAULT_MAX_ENTRIES_PER_FILE: i32 = 50;
const DEFAULT_MAX_AGE_DAYS: i32 = 30;

// Get history configuration from app settings
fn get_history_config(state: &AppState) -> (i32, i32, i32) {
    let max_files = crate::db::get_setting(&state.db_path, "history_max_files_per_project")
        .unwrap_or(None)
        .and_then(|s| s.parse().ok())
        .unwrap_or(DEFAULT_MAX_FILES_PER_PROJECT);
    
    let max_entries = crate::db::get_setting(&state.db_path, "history_max_entries_per_file")
        .unwrap_or(None)
        .and_then(|s| s.parse().ok())
        .unwrap_or(DEFAULT_MAX_ENTRIES_PER_FILE);
    
    let max_age_days = crate::db::get_setting(&state.db_path, "history_max_age_days")
        .unwrap_or(None)
        .and_then(|s| s.parse().ok())
        .unwrap_or(DEFAULT_MAX_AGE_DAYS);
    
    (max_files, max_entries, max_age_days)
}

// Clean up old history entries based on configuration
fn cleanup_history(file_path: &str, state: &AppState) {
    let history_dir = get_history_dir(file_path);
    let (max_files_total, max_entries_per_file, max_age_days) = get_history_config(state);
    
    if !history_dir.exists() {
        return;
    }
    
    // Get all history files with timestamps
    let mut history_files = Vec::new();
    if let Ok(read_dir) = fs::read_dir(&history_dir) {
        for entry in read_dir.filter_map(Result::ok) {
            let path = entry.path();
            if path.is_file() {
                if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
                    if let Some(extension) = path.extension().and_then(|s| s.to_str()) {
                        let timestamp_and_id = file_name.trim_end_matches(&format!(".{}", extension));
                        let parts: Vec<&str> = timestamp_and_id.split('_').collect();
                        if parts.len() == 2 {
                            if let Ok(timestamp) = parts[0].parse::<i64>() {
                                history_files.push((timestamp, path));
                            }
                        }
                    }
                }
            }
        }
    }
    
    // Sort by timestamp (newest first)
    history_files.sort_by(|a, b| b.0.cmp(&a.0));
    
    let current_time = match SystemTime::now().duration_since(UNIX_EPOCH) {
        Ok(duration) => duration.as_millis() as i64,
        Err(_) => return, // Cannot get current time, skip cleanup
    };
    
    let max_age_ms = max_age_days as i64 * 24 * 60 * 60 * 1000;
    
    // Remove files that are too old or exceed the limit
    for (i, (timestamp, path)) in history_files.iter().enumerate() {
        let should_remove = i >= max_entries_per_file as usize || 
                           (current_time - timestamp) > max_age_ms;
        
        if should_remove {
            let _ = fs::remove_file(path);
        }
    }
    
    // Global cleanup: check total files across all projects
    cleanup_global_history(state, max_files_total);
}

// Clean up history globally across all projects
fn cleanup_global_history(_state: &AppState, max_files_total: i32) {
    let history_root = PathBuf::from(".history");
    if !history_root.exists() {
        return;
    }
    
    let mut all_files = Vec::new();
    
    // Collect all history files from all projects
    if let Ok(read_dir) = fs::read_dir(&history_root) {
        for project_entry in read_dir.filter_map(Result::ok) {
            let project_path = project_entry.path();
            if project_path.is_dir() {
                if let Ok(file_read_dir) = fs::read_dir(&project_path) {
                    for file_entry in file_read_dir.filter_map(Result::ok) {
                        let file_path = file_entry.path();
                        if file_path.is_file() {
                            if let Some(file_name) = file_path.file_name().and_then(|n| n.to_str()) {
                                if let Some(extension) = file_path.extension().and_then(|s| s.to_str()) {
                                    let timestamp_and_id = file_name.trim_end_matches(&format!(".{}", extension));
                                    let parts: Vec<&str> = timestamp_and_id.split('_').collect();
                                    if parts.len() == 2 {
                                        if let Ok(timestamp) = parts[0].parse::<i64>() {
                                            all_files.push((timestamp, file_path));
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
    }
    
    // Sort by timestamp (newest first)
    all_files.sort_by(|a, b| b.0.cmp(&a.0));
    
    // Remove oldest files if we exceed the global limit
    if all_files.len() > max_files_total as usize {
        for (_, path) in all_files.iter().skip(max_files_total as usize) {
            let _ = fs::remove_file(path);
        }
    }
}

#[tauri::command]
pub async fn get_local_history_content(file_path: &str, id: &str) -> Result<String, String> {
    let history_dir = get_history_dir(file_path);
    
    let mut read_dir = match tokio::fs::read_dir(history_dir).await {
        Ok(dir) => dir,
        Err(e) => return Err(format!("Failed to read directory: {}", e)),
    };
    
    while let Some(entry) = read_dir.next_entry().await.map_err(|e| format!("Directory read error: {}", e))? {
        let path = entry.path();
        if path.is_file() {
            if let Some(timestamp_str) = path.file_name().and_then(|n| n.to_str()) {
                if timestamp_str.contains(id) {
                    return match tokio::fs::read_to_string(path).await {
                        Ok(content) => Ok(content),
                        Err(e) => Err(format!("Failed to read file: {}", e)),
                    };
                }
            }
        }
    }

    Err("File not found".to_string())
}

#[tauri::command]
pub fn set_history_config(key: &str, value: i32, state: State<'_, AppState>) -> Result<(), String> {
    match key {
        "max_files_per_project" | "max_entries_per_file" | "max_age_days" => {
            let setting_key = format!("history_{}", key);
            crate::db::set_setting(&state.db_path, &setting_key, &value.to_string())
                .map_err(|e| format!("Failed to save setting: {}", e))?;
            Ok(())
        }
        _ => Err("Invalid configuration key".to_string()),
    }
}

#[tauri::command]
pub fn get_history_config_value(key: &str, state: State<'_, AppState>) -> Result<i32, String> {
    let setting_key = format!("history_{}", key);
    let default = match key {
        "max_files_per_project" => DEFAULT_MAX_FILES_PER_PROJECT,
        "max_entries_per_file" => DEFAULT_MAX_ENTRIES_PER_FILE,
        "max_age_days" => DEFAULT_MAX_AGE_DAYS,
        _ => return Err("Invalid configuration key".to_string()),
    };

    let value = crate::db::get_setting(&state.db_path, &setting_key)
        .map_err(|e| format!("Failed to get setting: {}", e))?
        .and_then(|s| s.parse().ok())
        .unwrap_or(default);

    Ok(value)
}

#[tauri::command]
pub fn cleanup_all_history(state: State<'_, AppState>) -> Result<String, String> {
    let history_root = PathBuf::from(".history");
    if !history_root.exists() {
        return Ok("No history directory found".to_string());
    }

    let (max_files_total, max_entries_per_file, max_age_days) = get_history_config(&state);
    let mut cleaned_files = 0usize;
    let mut cleaned_dirs = 0usize;

    // Per-directory cleanup and collect all files for global cap
    let mut all_files: Vec<(i64, PathBuf)> = Vec::new();
    if let Ok(read_dir) = fs::read_dir(&history_root) {
        for dir_entry in read_dir.filter_map(Result::ok) {
            let dir_path = dir_entry.path();
            if !dir_path.is_dir() { continue; }

            let mut dir_files: Vec<(i64, PathBuf)> = Vec::new();
            if let Ok(files_iter) = fs::read_dir(&dir_path) {
                for f in files_iter.filter_map(Result::ok) {
                    let path = f.path();
                    if path.is_file() {
                        if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
                            if let Some(ext) = path.extension().and_then(|s| s.to_str()) {
                                let base = file_name.trim_end_matches(&format!(".{}", ext));
                                let parts: Vec<&str> = base.split('_').collect();
                                if parts.len() == 2 {
                                    if let Ok(ts) = parts[0].parse::<i64>() { dir_files.push((ts, path.clone())); }
                                }
                            }
                        }
                    }
                }
            }

            if dir_files.is_empty() { continue; }

            // Sort newest first
            dir_files.sort_by(|a, b| b.0.cmp(&a.0));
            let now_ms = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(d) => d.as_millis() as i64, Err(_) => 0 };
            let max_age_ms = max_age_days as i64 * 24 * 60 * 60 * 1000;

            for (i, (ts, p)) in dir_files.iter().enumerate() {
                let too_old = now_ms > 0 && (now_ms - ts) > max_age_ms;
                let exceed = i >= max_entries_per_file as usize;
                if too_old || exceed {
                    if fs::remove_file(p).is_ok() { cleaned_files += 1; }
                } else {
                    all_files.push((*ts, p.clone()));
                }
            }
            cleaned_dirs += 1;
        }
    }

    // Enforce global cap across all dirs (newest first)
    all_files.sort_by(|a, b| b.0.cmp(&a.0));
    if all_files.len() > max_files_total as usize {
        for (_, p) in all_files.into_iter().skip(max_files_total as usize) {
            if fs::remove_file(p).is_ok() { cleaned_files += 1; }
        }
    }

    Ok(format!("Cleaned {} files across {} history dirs", cleaned_files, cleaned_dirs))
}