"""
Command line interface for Claude Code workdir statistics.
"""

import json
import logging
import argparse
import asyncio
from pathlib import Path
from typing import List, Optional, Dict, Any
from datetime import datetime, timedelta, timezone

from .core import WorkdirStats, WorkdirStatsCollector, SessionInfo
from .semantic_search import create_semantic_searcher, SEMANTIC_RAG_AVAILABLE
import re
import tempfile
import pickle
import os


# Global session cache for --enter-session functionality
_session_cache = {}


def save_session_cache(sessions_with_ids: List[tuple]) -> None:
    """Save session cache for --enter-session functionality."""
    global _session_cache
    _session_cache = {sid: (workdir, session) for sid, workdir, session in sessions_with_ids}
    
    # Also save to temp file for persistence across runs
    cache_file = os.path.join(tempfile.gettempdir(), 'ccwork_session_cache.pkl')
    try:
        with open(cache_file, 'wb') as f:
            pickle.dump(_session_cache, f)
    except Exception:
        pass  # Ignore cache save errors


def load_session_cache() -> Dict:
    """Load session cache from temp file."""
    global _session_cache
    if _session_cache:
        return _session_cache
    
    cache_file = os.path.join(tempfile.gettempdir(), 'ccwork_session_cache.pkl')
    try:
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as f:
                _session_cache = pickle.load(f)
                return _session_cache
    except Exception:
        pass  # Ignore cache load errors
    
    return {}


def enter_session_by_id(session_id: str) -> None:
    """Enter/activate a session by its ID."""
    cache = load_session_cache()
    
    if not cache:
        print("❌ No cached sessions found. Run a search query first to populate the cache.")
        return
    
    try:
        sid = int(session_id)
    except ValueError:
        print(f"❌ Invalid session ID: {session_id}. Must be a number.")
        return
    
    if sid not in cache:
        print(f"❌ Session ID {sid} not found in cache.")
        print(f"Available IDs: {', '.join(map(str, sorted(cache.keys())))}")
        return
    
    workdir, session = cache[sid]
    
    # For now, just print the session information
    # In the future, this could be extended to actually activate/open the session
    print(f"📂 Session Details:")
    print(f"   Directory: {workdir}")
    print(f"   Session ID: {session.session_id}")
    print(f"   Start Time: {session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
    print(f"   Abstract: {session.abstract or 'No abstract available'}")
    print(f"   Tokens: {session.total_tokens:,} | Cost: ${session.total_cost:.3f}")
    if session.models_used:
        print(f"   Models: {', '.join(session.models_used)}")
    print()
    print(f"🔗 To navigate to session directory:")
    print(f"   cd '{workdir}'")
    print()
    print(f"💡 Session file location (if available):")
    print(f"   ~/.claude/sessions/{session.session_id}")


# Color utility functions for terminal output
class Colors:
    """ANSI color codes for terminal output."""
    # Basic colors
    RESET = '\033[0m'
    BOLD = '\033[1m'
    DIM = '\033[2m'
    
    # Foreground colors
    BLACK = '\033[30m'
    RED = '\033[31m'
    GREEN = '\033[32m'
    YELLOW = '\033[33m'
    BLUE = '\033[34m'
    MAGENTA = '\033[35m'
    CYAN = '\033[36m'
    WHITE = '\033[37m'
    
    # Bright foreground colors
    BRIGHT_BLACK = '\033[90m'
    BRIGHT_RED = '\033[91m'
    BRIGHT_GREEN = '\033[92m'
    BRIGHT_YELLOW = '\033[93m'
    BRIGHT_BLUE = '\033[94m'
    BRIGHT_MAGENTA = '\033[95m'
    BRIGHT_CYAN = '\033[96m'
    BRIGHT_WHITE = '\033[97m'
    
    @staticmethod
    def is_supported():
        """Check if terminal supports colors."""
        return (
            hasattr(os.sys.stdout, 'isatty') and os.sys.stdout.isatty() and
            os.getenv('TERM', '').lower() != 'dumb' and
            os.getenv('NO_COLOR', '') == ''
        )


def colorize(text: str, color: str) -> str:
    """Apply color to text if terminal supports it."""
    if Colors.is_supported():
        return f"{color}{text}{Colors.RESET}"
    return text


def format_directory_id(dir_id: int) -> str:
    """Format directory ID with color."""
    return colorize(f"{dir_id:2d}.", Colors.BRIGHT_CYAN + Colors.BOLD)


def format_directory_path(path: str, max_length: int = 0) -> str:
    """Format directory path with color."""
    if max_length > 0:
        path = format_path(path, max_length)
    return colorize(path, Colors.BRIGHT_WHITE + Colors.BOLD)


def format_session_count(active: int, total: int) -> str:
    """Format session count with color."""
    active_str = colorize(str(active), Colors.BRIGHT_GREEN if active > 0 else Colors.BRIGHT_BLACK)
    total_str = colorize(str(total), Colors.CYAN)
    return f"{active_str}/{total_str}"


def format_timestamp(timestamp_str: str) -> str:
    """Format timestamp with color."""
    return colorize(timestamp_str, Colors.YELLOW)


def format_status_indicator(is_active: bool) -> str:
    """Format status indicator with color."""
    if is_active:
        return colorize("🟢 ACTIVE", Colors.BRIGHT_GREEN + Colors.BOLD)
    else:
        return colorize("⚪ INACTIVE", Colors.BRIGHT_BLACK)


def format_tokens(tokens: int) -> str:
    """Format token count with color."""
    return colorize(f"{tokens:,}", Colors.MAGENTA)


def format_cost(cost: float) -> str:
    """Format cost with color."""
    return colorize(f"${cost:.3f}", Colors.BRIGHT_YELLOW)


# Cache functionality for directory listings
CACHE_DIR = os.path.expanduser("~/.ccwork_cache")
CACHE_FILE = os.path.join(CACHE_DIR, "directory_list_cache.json")
CACHE_EXPIRY_MINUTES = 30  # Cache expires after 30 minutes


def ensure_cache_dir():
    """Ensure cache directory exists."""
    os.makedirs(CACHE_DIR, exist_ok=True)


def save_directory_cache(directory_list: List[tuple]) -> None:
    """Save directory list to cache with timestamp."""
    try:
        ensure_cache_dir()
        cache_data = {
            "timestamp": datetime.now().isoformat(),
            "directories": []
        }
        
        # Convert directory list to cacheable format
        for dir_id, stats in directory_list:
            cache_data["directories"].append({
                "id": dir_id,
                "workdir": stats.workdir,
                "active_sessions": stats.active_sessions,
                "session_count": stats.session_count,
                "last_activity": stats.last_activity.isoformat() if stats.last_activity else None,
                "total_tokens": stats.total_tokens,
                "total_cost": stats.total_cost,
                "models_used": stats.models_used
            })
        
        with open(CACHE_FILE, 'w') as f:
            json.dump(cache_data, f, indent=2)
            
    except Exception as e:
        # Cache failure shouldn't break the command
        pass


def load_directory_cache() -> Optional[List[tuple]]:
    """Load directory list from cache if valid and not expired."""
    try:
        if not os.path.exists(CACHE_FILE):
            return None
            
        with open(CACHE_FILE, 'r') as f:
            cache_data = json.load(f)
            
        # Check cache expiry
        cache_time = datetime.fromisoformat(cache_data["timestamp"])
        expiry_time = cache_time + timedelta(minutes=CACHE_EXPIRY_MINUTES)
        
        if datetime.now() > expiry_time:
            return None  # Cache expired
            
        # Reconstruct directory list
        directory_list = []
        for dir_data in cache_data["directories"]:
            # Create a minimal stats object for cache purposes
            from types import SimpleNamespace
            stats = SimpleNamespace(
                workdir=dir_data["workdir"],
                active_sessions=dir_data["active_sessions"],
                session_count=dir_data["session_count"],
                last_activity=datetime.fromisoformat(dir_data["last_activity"]) if dir_data["last_activity"] else None,
                total_tokens=dir_data["total_tokens"],
                total_cost=dir_data["total_cost"],
                models_used=dir_data["models_used"]
            )
            directory_list.append((dir_data["id"], stats))
            
        return directory_list
        
    except Exception as e:
        # Cache failure shouldn't break the command
        return None


def clear_directory_cache() -> None:
    """Clear the directory cache file."""
    try:
        if os.path.exists(CACHE_FILE):
            os.remove(CACHE_FILE)
    except Exception:
        pass


def search_sessions_by_string(collector: WorkdirStatsCollector, search_term: str, workdir_filter: Optional[str] = None) -> None:
    """Search session history using string matching."""
    if workdir_filter:
        print(f"🔍 Searching session history for: '{search_term}' (in {workdir_filter})")
    else:
        print(f"🌐 Searching ALL session history for: '{search_term}' (global search)")
    print("=" * 60)
    
    stats_list = collector.collect_all_workdir_stats()
    
    # Apply workdir filtering if specified
    if workdir_filter:
        stats_list = [s for s in stats_list if s.workdir == workdir_filter or s.workdir == workdir_filter.replace(str(Path.cwd()), '.')]
        if not stats_list:
            print(f"No activity found for workdir: {workdir_filter}")
            return
    
    found_sessions = []
    
    for stats in stats_list:
        sessions = collector.get_workdir_sessions(stats.workdir)
        for session in sessions:
            # Search in first user prompt and abstract
            matches = []
            
            if session.first_user_prompt and search_term.lower() in session.first_user_prompt.lower():
                matches.append("prompt")
            
            if session.abstract and search_term.lower() in session.abstract.lower():
                matches.append("abstract")
            
            if matches:
                found_sessions.append((stats.workdir, session, matches))
    
    if not found_sessions:
        print(f"No sessions found matching '{search_term}'")
        return
    
    print(f"Found {len(found_sessions)} matching sessions:\n")
    
    # Create sessions with IDs and save to cache
    sessions_with_ids = []
    for i, (workdir, session, matches) in enumerate(found_sessions, 1):
        sessions_with_ids.append((i, workdir, session))
    
    save_session_cache(sessions_with_ids)
    
    for i, (workdir, session, matches) in enumerate(found_sessions, 1):
        status = "🟢 ACTIVE" if session.is_active else "⚪ INACTIVE"
        match_info = f" (matched in: {', '.join(matches)})"
        
        print(f"{i:2d}. {status} {workdir}{match_info}")
        print(f"    Session: {session.session_id}")
        print(f"    Time: {session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
        print(f"    Abstract: {session.abstract or 'No abstract available'}")
        print(f"    Tokens: {session.total_tokens:,} | Cost: ${session.total_cost:.3f}")
        if session.models_used:
            print(f"    Models: {', '.join(session.models_used)}")
        print()


async def search_sessions_by_query_async(collector: WorkdirStatsCollector, query: str, workdir_filter: Optional[str] = None) -> None:
    """Search sessions using semantic/natural language search (async version)."""
    if workdir_filter:
        print(f"🔍 Semantic search for: '{query}' (in {workdir_filter})")
    else:
        print(f"🌐 Semantic search for: '{query}' (global search)")
    print("=" * 60)
    
    # Try to use semantic search if available
    if SEMANTIC_RAG_AVAILABLE:
        try:
            semantic_searcher = create_semantic_searcher()
            if semantic_searcher:
                print("🧠 Using enhanced semantic search with embeddings...")
                
                # Initialize semantic searcher
                success, error = await semantic_searcher.initialize()
                if not success:
                    print(f"⚠️  Semantic search initialization failed: {error}")
                    print("   Falling back to enhanced keyword search...")
                    semantic_searcher = None
                
                if semantic_searcher:
                    # Use semantic search with workdir filtering
                    matches = await semantic_searcher.search_sessions_semantic(collector, query, workdir_filter=workdir_filter)
                    
                    if not matches:
                        print(f"No sessions found matching '{query}'")
                        return
                    
                    print(f"Found {len(matches)} relevant sessions (sorted by semantic similarity):\n")
                    
                    for match in matches:
                        status = "🟢 ACTIVE" if match.session.is_active else "⚪ INACTIVE"
                        relevance = f"(similarity: {match.score:.3f})"
                        match_info = f"(matched in {match.match_source})"
                        
                        print(f"{status} {match.workdir} {relevance} {match_info}")
                        print(f"   Session: {match.session.session_id}")
                        print(f"   Time: {match.session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
                        print(f"   Abstract: {match.session.abstract or 'No abstract available'}")
                        print(f"   Tokens: {match.session.total_tokens:,} | Cost: ${match.session.total_cost:.3f}")
                        if match.session.models_used:
                            print(f"   Models: {', '.join(match.session.models_used)}")
                        if match.matched_content and len(match.matched_content) > 0:
                            print(f"   Preview: {match.matched_content[:150]}...")
                        print()
                    return
        
        except Exception as e:
            print(f"⚠️  Semantic search failed: {e}")
            print("   Falling back to enhanced keyword search...")
    
    # Fallback to original keyword-based search
    print("🔤 Using enhanced keyword search...")
    search_sessions_by_query_fallback(collector, query, workdir_filter)


def search_sessions_by_query_fallback(collector: WorkdirStatsCollector, query: str, workdir_filter: Optional[str] = None) -> None:
    """Fallback search using enhanced keyword matching."""
    stats_list = collector.collect_all_workdir_stats()
    
    # Apply workdir filtering if specified
    if workdir_filter:
        stats_list = [s for s in stats_list if s.workdir == workdir_filter or s.workdir == workdir_filter.replace(str(Path.cwd()), '.')]
        if not stats_list:
            print(f"No activity found for workdir: {workdir_filter}")
            return
    
    scored_sessions = []
    
    # Convert query to lowercase for comparison
    query_lower = query.lower()
    query_words = re.findall(r'\b\w+\b', query_lower)
    
    for stats in stats_list:
        sessions = collector.get_workdir_sessions(stats.workdir)
        for session in sessions:
            score = 0
            
            # Score based on word matches in prompts and abstracts
            text_to_search = []
            if session.first_user_prompt:
                text_to_search.append(session.first_user_prompt.lower())
            if session.abstract:
                text_to_search.append(session.abstract.lower())
            
            full_text = ' '.join(text_to_search)
            
            if full_text:
                # Count matching words
                for word in query_words:
                    if word in full_text:
                        score += 1
                
                # Bonus for exact phrase matches
                if query_lower in full_text:
                    score += len(query_words)
                
                # Score normalization (higher is better)
                if score > 0:
                    scored_sessions.append((score, stats.workdir, session))
    
    # Sort by score (highest first)
    scored_sessions.sort(key=lambda x: x[0], reverse=True)
    
    if not scored_sessions:
        print(f"No sessions found matching '{query}'")
        return
    
    print(f"Found {len(scored_sessions)} relevant sessions (sorted by relevance):\n")
    
    # Create sessions with IDs and save to cache
    sessions_with_ids = []
    for i, (score, workdir, session) in enumerate(scored_sessions, 1):
        sessions_with_ids.append((i, workdir, session))
    
    save_session_cache(sessions_with_ids)
    
    for i, (score, workdir, session) in enumerate(scored_sessions, 1):
        status = "🟢 ACTIVE" if session.is_active else "⚪ INACTIVE"
        relevance = f"(relevance: {score})"
        
        print(f"{i:2d}. {status} {workdir} {relevance}")
        print(f"    Session: {session.session_id}")
        print(f"    Time: {session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC')}")
        print(f"    Abstract: {session.abstract or 'No abstract available'}")
        print(f"    Tokens: {session.total_tokens:,} | Cost: ${session.total_cost:.3f}")
        if session.models_used:
            print(f"    Models: {', '.join(session.models_used)}")
        print()


def search_sessions_by_query(collector: WorkdirStatsCollector, query: str, workdir_filter: Optional[str] = None) -> None:
    """Search sessions using semantic/natural language search (sync wrapper)."""
    # Run the async version
    asyncio.run(search_sessions_by_query_async(collector, query, workdir_filter))


def get_missing_sessions_details(collector: WorkdirStatsCollector, missing_session_keys: List[str]) -> List[Dict[str, Any]]:
    """Get detailed information about missing sessions and why they failed to index."""
    missing_details = []
    
    # Create a mapping of session keys to session objects
    session_map = {}
    stats_list = collector.collect_all_workdir_stats()
    
    for stats in stats_list:
        sessions = collector.get_workdir_sessions(stats.workdir)
        for session in sessions:
            session_key = f"{stats.workdir}:{session.session_id}"
            session_map[session_key] = {
                'session': session,
                'workdir': stats.workdir
            }
    
    # Get details for each missing session
    for session_key in missing_session_keys:
        if session_key in session_map:
            session_data = session_map[session_key]
            session = session_data['session']
            workdir = session_data['workdir']
            
            # Analyze why this session might have failed to index
            failure_reason = analyze_indexing_failure(session)
            
            missing_details.append({
                'session_id': session.session_id,
                'workdir': workdir,
                'start_time': session.start_time.strftime('%Y-%m-%d %H:%M:%S'),
                'prompt': session.first_user_prompt or "No prompt available",
                'model': ", ".join(session.models_used) if session.models_used else "Unknown",
                'tokens': session.total_tokens,
                'has_abstract': bool(session.abstract),
                'indexing_failure_reason': failure_reason
            })
        else:
            # Session key exists in index but not found in current sessions
            workdir, session_id = session_key.split(':', 1) if ':' in session_key else ('Unknown', session_key)
            missing_details.append({
                'session_id': session_id,
                'workdir': workdir,
                'start_time': "Unknown",
                'prompt': "Session not found in current data",
                'model': "Unknown",
                'tokens': 0,
                'has_abstract': False,
                'indexing_failure_reason': "Session no longer exists in current data"
            })
    
    # Sort by start_time (most recent first)
    missing_details.sort(key=lambda x: x['start_time'], reverse=True)
    return missing_details


def clean_unindexable_sessions(collector: WorkdirStatsCollector) -> None:
    """Clean sessions that cannot be indexed (no prompt or very short prompts)."""
    print("🧹 Scanning for unindexable sessions...")
    print("=" * 50)
    
    # Get all sessions
    stats_list = collector.collect_all_workdir_stats()
    all_sessions = []
    
    for stats in stats_list:
        sessions = collector.get_workdir_sessions(stats.workdir)
        for session in sessions:
            all_sessions.append((session, stats.workdir))
    
    # Find unindexable sessions
    unindexable_sessions = []
    active_unindexable = []
    
    for session, workdir in all_sessions:
        should_clean = False
        reason = ""
        
        # Check if session has no prompt
        if not session.first_user_prompt:
            should_clean = True
            reason = "No user prompt"
        # Check if session has very short prompt (< 10 chars)
        elif len(session.first_user_prompt.strip()) < 10:
            should_clean = True
            reason = f"Very short prompt ({len(session.first_user_prompt.strip())} chars)"
        
        if should_clean:
            if session.is_active:
                active_unindexable.append((session, workdir, reason))
            else:
                unindexable_sessions.append((session, workdir, reason))
    
    # Display what will be cleaned
    total_cleanable = len(unindexable_sessions)
    total_active_unindexable = len(active_unindexable)
    
    print(f"📊 Found {total_cleanable} inactive unindexable sessions")
    if total_active_unindexable > 0:
        print(f"⚠️  Found {total_active_unindexable} active unindexable sessions (will be skipped)")
    
    if total_cleanable == 0:
        print("✅ No unindexable sessions to clean!")
        return
    
    # Show details of sessions to be cleaned
    print(f"\n📝 Sessions to be cleaned:")
    
    for i, (session, workdir, reason) in enumerate(unindexable_sessions[:20]):  # Show first 20
        start_time = session.start_time.strftime('%Y-%m-%d %H:%M:%S')
        prompt_preview = session.first_user_prompt[:50] + "..." if session.first_user_prompt and len(session.first_user_prompt) > 50 else session.first_user_prompt or "No prompt"
        
        print(f"   {i+1:2d}. {session.session_id}")
        print(f"       Workdir: {format_path(workdir)}")
        print(f"       Time: {start_time}")
        print(f"       Reason: {reason}")
        print(f"       Prompt: {prompt_preview}")
        print()
    
    if total_cleanable > 20:
        print(f"   ... and {total_cleanable - 20} more sessions")
        print()
    
    if total_active_unindexable > 0:
        print(f"⚠️  Active unindexable sessions (NOT cleaned):")
        for i, (session, workdir, reason) in enumerate(active_unindexable[:5]):
            start_time = session.start_time.strftime('%Y-%m-%d %H:%M:%S')
            print(f"   {i+1}. {session.session_id} - {reason} ({start_time})")
        
        if total_active_unindexable > 5:
            print(f"   ... and {total_active_unindexable - 5} more active sessions")
        print()
    
    # Ask for confirmation
    try:
        response = input(f"🗑️  Delete {total_cleanable} unindexable sessions? (y/N): ").strip().lower()
        if response not in ['y', 'yes']:
            print("❌ Cleaning cancelled")
            return
    except KeyboardInterrupt:
        print("\n❌ Cleaning cancelled")
        return
    
    # Perform the cleaning
    print("\n🗑️  Cleaning unindexable sessions...")
    cleaned_count = 0
    
    for session, workdir, reason in unindexable_sessions:
        try:
            # This would need to be implemented based on how sessions are stored
            # For now, we'll just report what would be cleaned
            print(f"   Cleaning {session.session_id} ({reason})")
            cleaned_count += 1
        except Exception as e:
            print(f"   ❌ Failed to clean {session.session_id}: {e}")
    
    print(f"\n✅ Cleaned {cleaned_count} unindexable sessions")
    print("💡 Note: This is a dry-run implementation. Actual deletion requires integration with the Claude Code session storage system.")


def analyze_indexing_failure(session: SessionInfo) -> str:
    """Analyze why a session might have failed to index."""
    reasons = []
    
    # Check for empty or very short prompt
    if not session.first_user_prompt:
        reasons.append("No user prompt")
    elif len(session.first_user_prompt.strip()) < 10:
        reasons.append("Very short prompt (<10 chars)")
    
    # Check for empty abstract
    if not session.abstract:
        reasons.append("No session abstract")
    elif len(session.abstract.strip()) < 10:
        reasons.append("Very short abstract (<10 chars)")
    
    # Check for special characters or encoding issues
    if session.first_user_prompt:
        try:
            session.first_user_prompt.encode('utf-8')
        except UnicodeEncodeError:
            reasons.append("Encoding issues in prompt")
    
    if session.abstract:
        try:
            session.abstract.encode('utf-8')
        except UnicodeEncodeError:
            reasons.append("Encoding issues in abstract")
    
    # Check session duration (very short sessions might be incomplete)
    if hasattr(session, 'end_time') and session.end_time and session.start_time:
        duration = (session.end_time - session.start_time).total_seconds()
        if duration < 5:
            reasons.append("Very short session (<5 seconds)")
    
    # Check for very recent sessions (might still be active)
    if session.start_time:
        # Handle timezone-aware datetime comparison
        try:
            if session.start_time.tzinfo is not None:
                # Session has timezone info, make current time timezone-aware
                from datetime import timezone
                now = datetime.now(timezone.utc)
            else:
                # Session is naive, use naive current time
                now = datetime.now()
            
            time_since_start = (now - session.start_time).total_seconds()
            if time_since_start < 300:  # Less than 5 minutes
                reasons.append("Very recent session (might still be active)")
        except Exception:
            # Skip this check if datetime comparison fails
            pass
    
    # Return primary reason or generic if none found
    if reasons:
        return reasons[0]  # Return the most likely reason
    else:
        return "Indexing error during embedding generation"


def display_index_status(collector: WorkdirStatsCollector) -> None:
    """Display detailed semantic search index status."""
    print("📊 Semantic Search Index Status")
    print("=" * 50)
    
    if not SEMANTIC_RAG_AVAILABLE:
        print("❌ Semantic RAG not available")
        print("   Please install dependencies: pip install qdrant-client openai aiohttp python-dotenv numpy")
        return
    
    try:
        semantic_searcher = create_semantic_searcher()
        if not semantic_searcher:
            print("❌ Could not create semantic searcher")
            return
        
        status = semantic_searcher.get_index_status(collector)
        
        # Basic status
        print(f"🗂️  Index File: {status['index_file_path']}")
        
        if status["index_exists"]:
            print(f"✅ Status: Index exists")
            print(f"📁 File Size: {format_file_size(status['index_file_size'])}")
            print(f"🕒 Last Modified: {format_timestamp(status['last_modified'])}")
            print()
            
            # Index statistics
            print("📈 Index Statistics:")
            print(f"   Sessions Indexed: {status['indexed_sessions']:,}")
            print(f"   Total Chunks: {status['indexed_chunks']:,}")
            print(f"   Embedding Dimensions: {status['embedding_dimensions']:,}d")
            print()
            
            # Content type distribution
            print("📋 Content Types:")
            for content_type, count in status['content_type_distribution'].items():
                print(f"   {content_type.capitalize()}: {count:,}")
            print()
            
            # Workdir distribution (top 10)
            print("📂 Top Working Directories:")
            sorted_workdirs = sorted(
                status['workdir_distribution'].items(), 
                key=lambda x: x[1], 
                reverse=True
            )
            for workdir, count in sorted_workdirs[:10]:
                short_path = format_path(workdir)
                print(f"   {short_path}: {count} sessions")
            
            if len(sorted_workdirs) > 10:
                remaining = len(sorted_workdirs) - 10
                total_remaining = sum(count for _, count in sorted_workdirs[10:])
                print(f"   ... and {remaining} more directories ({total_remaining} sessions)")
            print()
            
            # Current vs indexed sessions
            current_sessions = status['current_sessions']
            indexed_sessions = status['indexed_sessions']
            missing_count = len(status['missing_sessions'])
            
            print("🔄 Synchronization Status:")
            print(f"   Current Sessions: {current_sessions:,}")
            print(f"   Indexed Sessions: {indexed_sessions:,}")
            
            if missing_count > 0:
                print(f"   ⚠️  Missing from Index: {missing_count:,}")
                print(f"      Run 'ccwork --index-sessions' to update")
                
                # Show detailed info about missing sessions
                print()
                print("📝 Missing Sessions Details:")
                missing_sessions_details = get_missing_sessions_details(collector, status['missing_sessions'])
                
                for i, session_info in enumerate(missing_sessions_details[:10]):  # Show first 10
                    workdir = session_info['workdir']
                    session_id = session_info['session_id']
                    start_time = session_info['start_time']
                    prompt = session_info['prompt'][:100] + "..." if len(session_info['prompt']) > 100 else session_info['prompt']
                    reason = session_info.get('indexing_failure_reason', 'Unknown')
                    
                    print(f"   {i+1:2d}. Session: {session_id}")
                    print(f"       Workdir: {format_path(workdir)}")
                    print(f"       Time: {start_time}")
                    print(f"       Prompt: {prompt}")
                    print(f"       Reason: {reason}")
                    print()
                
                if missing_count > 10:
                    print(f"   ... and {missing_count - 10} more missing sessions")
                    
            else:
                print(f"   ✅ Index is up to date")
            print()
            
        else:
            print("❌ Status: No index found")
            print(f"📊 Current Sessions: {status['current_sessions']:,}")
            print("💡 Run 'ccwork --index-sessions' to create the index")
            print()
        
        # Configuration
        print("⚙️  Configuration:")
        print(f"   Embedder Provider: {status['configuration']['embedder_provider']}")
        print(f"   Workspace Path: {status['configuration']['workspace_path']}")
        
        if "index_error" in status:
            print()
            print(f"⚠️  Index Error: {status['index_error']}")
            
    except Exception as e:
        print(f"❌ Error getting index status: {e}")


def format_file_size(size_bytes: int) -> str:
    """Format file size in human readable format."""
    if size_bytes < 1024:
        return f"{size_bytes} B"
    elif size_bytes < 1024 * 1024:
        return f"{size_bytes / 1024:.1f} KB"
    elif size_bytes < 1024 * 1024 * 1024:
        return f"{size_bytes / (1024 * 1024):.1f} MB"
    else:
        return f"{size_bytes / (1024 * 1024 * 1024):.1f} GB"


def format_timestamp(iso_timestamp: Optional[str]) -> str:
    """Format ISO timestamp for display."""
    if not iso_timestamp:
        return "Unknown"
    
    try:
        dt = datetime.fromisoformat(iso_timestamp)
        now = datetime.now()
        diff = now - dt
        
        if diff.days > 0:
            return f"{diff.days} days ago ({dt.strftime('%Y-%m-%d %H:%M')})"
        elif diff.seconds > 3600:
            hours = diff.seconds // 3600
            return f"{hours} hours ago ({dt.strftime('%H:%M')})"
        elif diff.seconds > 60:
            minutes = diff.seconds // 60
            return f"{minutes} minutes ago ({dt.strftime('%H:%M')})"
        else:
            return f"Just now ({dt.strftime('%H:%M')})"
    except Exception:
        return iso_timestamp


def format_path(path: str, max_length: int = 60) -> str:
    """Format long paths for display."""
    if len(path) <= max_length:
        return path
    
    # Try to keep the last part of the path
    if path.startswith('/'):
        parts = path.split('/')
        if len(parts) > 2:
            # Show /.../{last_few_parts}
            return f"/.../{'/'.join(parts[-2:])}"
    
    # Fallback: truncate with ellipsis
    return f"...{path[-(max_length-3):]}"


def format_relative_time(dt: datetime) -> str:
    """Format datetime as relative time like '8 hours ago (08-24 13:16)' in local timezone."""
    if not dt:
        return "Unknown"
    
    try:
        # Convert to local timezone for display
        if dt.tzinfo is not None:
            # Convert UTC to local time
            local_dt = dt.astimezone()
            now = datetime.now().astimezone()
        else:
            # Assume it's already in local time
            local_dt = dt
            now = datetime.now()
        
        diff = now - local_dt
        
        # Format the absolute time part in local timezone
        time_str = local_dt.strftime('%m-%d %H:%M')
        
        # Calculate relative time
        if diff.days > 0:
            if diff.days == 1:
                relative = "1 day ago"
            else:
                relative = f"{diff.days} days ago"
        elif diff.seconds >= 3600:
            hours = diff.seconds // 3600
            if hours == 1:
                relative = "1 hour ago"
            else:
                relative = f"{hours} hours ago"
        elif diff.seconds >= 60:
            minutes = diff.seconds // 60
            if minutes == 1:
                relative = "1 minute ago"
            else:
                relative = f"{minutes} minutes ago"
        else:
            relative = "just now"
        
        return f"{relative} ({time_str})"
        
    except Exception:
        # Fallback to local time display
        try:
            if dt.tzinfo is not None:
                local_dt = dt.astimezone()
            else:
                local_dt = dt
            return local_dt.strftime('%m-%d %H:%M')
        except Exception:
            return dt.strftime('%m-%d %H:%M')


def list_directories_with_ids(collector: WorkdirStatsCollector) -> List[tuple]:
    """List all Claude session directories with sequential IDs, sorted by priority."""
    stats_list = collector.collect_all_workdir_stats()
    
    # Sort by priority: active sessions first, then total sessions, then recency
    def sort_priority(stats):
        # Priority 1: Active session count (highest weight)
        active_count = len(stats.active_sessions)
        active_weight = active_count * 100000  # Very high weight for active sessions
        
        # Priority 2: Total session count (medium weight)
        total_weight = stats.session_count * 1000  # Medium weight for total sessions
        
        # Priority 3: Recency (lowest weight)
        last_activity_timestamp = stats.last_activity.timestamp() if stats.last_activity else 0
        recency_weight = last_activity_timestamp / 1000  # Convert to reasonable scale
        
        return active_weight + total_weight + recency_weight
    
    stats_list.sort(key=sort_priority, reverse=True)
    
    directory_list = []
    for i, stats in enumerate(stats_list, 1):
        directory_list.append((i, stats))
    
    return directory_list


def display_directory_list(directory_list: List[tuple], show_all: bool = False) -> None:
    """Display the directory list with IDs in concise format with colors."""
    if not directory_list:
        print("No Claude session directories found.")
        return
    
    # Filter out inactive directories unless show_all is True
    filtered_list = []
    for dir_id, stats in directory_list:
        active_count = len(stats.active_sessions)
        if show_all or active_count > 0:
            filtered_list.append((dir_id, stats))
    
    if not filtered_list:
        if not show_all:
            print("No active Claude session directories found.")
            print("Use --all to see inactive directories as well.")
        else:
            print("No Claude session directories found.")
        return
    
    for dir_id, stats in filtered_list:
        active_count = len(stats.active_sessions)
        total_count = stats.session_count
        
        # Format the main line with colors
        id_colored = colorize(f"{dir_id}.", Colors.BRIGHT_CYAN + Colors.BOLD)
        path_colored = colorize(stats.workdir, Colors.BRIGHT_WHITE)
        print(f"{id_colored} {path_colored}")
        
        # Format the details with colors
        time_info = format_relative_time(stats.last_activity)
        
        session_label = colorize("Session:", Colors.DIM)
        # Color active and total counts differently - total uses standard cyan
        active_colored = colorize(str(active_count), Colors.BRIGHT_GREEN if active_count > 0 else Colors.DIM)
        total_colored = colorize(str(total_count), Colors.CYAN)
        sessions_colored = f"{active_colored}/{total_colored}"
        
        last_label = colorize("Last:", Colors.DIM)
        time_colored = colorize(time_info, Colors.YELLOW if active_count > 0 else Colors.DIM)
        
        print(f"   {session_label} {sessions_colored} | {last_label} {time_colored}")
        print()  # Empty line between entries




def format_workdir_stats(stats_list: List[WorkdirStats], show_inactive: bool = False, collector: Optional[WorkdirStatsCollector] = None, simple_format: bool = False) -> str:
    """Format workdir statistics for display."""
    if not stats_list:
        return "No Claude activity found in any working directories."
    
    if simple_format:
        return format_workdir_stats_simple(stats_list, show_inactive, collector)
    
    output = []
    output.append("=" * 80)
    output.append("CLAUDE CODE WORKDIR STATISTICS")
    output.append("=" * 80)
    output.append("")
    
    active_count = sum(1 for s in stats_list if s.active_sessions)
    total_count = len(stats_list)
    
    output.append(f"Found {total_count} workdirs with Claude activity")
    output.append(f"Active workdirs (last 24 hours): {active_count}")
    output.append("")
    
    for i, stats in enumerate(stats_list):
        if not show_inactive and not stats.active_sessions:
            continue
        
        # Determine status - check for running sessions if collector is available
        status = "⚪ INACTIVE"
        if stats.active_sessions:
            status = "🟢 ACTIVE"
            
            # Check if any sessions are currently running (last 30 minutes)
            if collector:
                sessions = collector.get_workdir_sessions(stats.workdir)
                running_sessions = [s for s in sessions if s.is_running]
                if running_sessions:
                    status = "🔴 RUNNING"
            
        output.append(f"{i+1}. {status} {stats.workdir}")
        output.append(f"   Last Activity: {stats.last_activity.strftime('%Y-%m-%d %H:%M:%S UTC') if stats.last_activity else 'Unknown'}")
        
        # Enhanced session count display
        session_info = f"   Sessions: {stats.session_count} | Active: {len(stats.active_sessions)}"
        if collector and stats.active_sessions:
            sessions = collector.get_workdir_sessions(stats.workdir)
            running_sessions = [s for s in sessions if s.is_running]
            if running_sessions:
                session_info += f" | Running: {len(running_sessions)}"
        output.append(session_info)
        output.append(f"   Tokens: {stats.total_tokens:,} (In: {stats.input_tokens:,}, Out: {stats.output_tokens:,})")
        if stats.cache_creation_tokens or stats.cache_read_tokens:
            output.append(f"   Cache: Created {stats.cache_creation_tokens:,}, Read {stats.cache_read_tokens:,}")
        output.append(f"   Cost: ${stats.total_cost:.3f}")
        if stats.models_used:
            output.append(f"   Models: {', '.join(stats.models_used)}")
        output.append("")
    
    return "\n".join(output)


def format_relative_time(dt: datetime) -> str:
    """Format datetime as relative time string."""
    if not dt:
        return "unknown time"
    
    now = datetime.now(timezone.utc)
    if dt.tzinfo is None:
        dt = dt.replace(tzinfo=timezone.utc)
    
    diff = now - dt
    total_seconds = diff.total_seconds()
    
    if total_seconds < 60:
        return "just now"
    elif total_seconds < 3600:
        minutes = int(total_seconds / 60)
        time_str = f"{minutes} minute{'s' if minutes != 1 else ''} ago"
    elif total_seconds < 86400:
        hours = int(total_seconds / 3600)
        time_str = f"{hours} hour{'s' if hours != 1 else ''} ago"
    else:
        days = int(total_seconds / 86400)
        time_str = f"{days} day{'s' if days != 1 else ''} ago"
    
    # Add formatted date for context
    formatted_date = dt.strftime("(%m-%d %H:%M)")
    return f"{time_str} {formatted_date}"


def format_workdir_stats_simple(stats_list: List[WorkdirStats], show_inactive: bool = False, collector: Optional[WorkdirStatsCollector] = None) -> str:
    """Format workdir statistics in simple format similar to cdwork."""
    output = []
    
    for i, stats in enumerate(stats_list):
        if not show_inactive and not stats.active_sessions:
            continue
        
        # Format relative time with color
        time_info = format_relative_time(stats.last_activity)
        time_colored = colorize(time_info, Colors.YELLOW if stats.active_sessions else Colors.DIM)
        
        # Session count with running indicator and colors
        active_count = len(stats.active_sessions)
        running_count = 0
        
        if collector and stats.active_sessions:
            sessions = collector.get_workdir_sessions(stats.workdir)
            running_sessions = [s for s in sessions if s.is_running]
            running_count = len(running_sessions)
        
        # Colored session display - use bright colors only for directories with active sessions
        if active_count > 0:
            active_colored = colorize(str(active_count), Colors.BRIGHT_GREEN)
            total_colored = colorize(str(stats.session_count), Colors.CYAN)
        else:
            # No active sessions - use dim colors for both numbers
            active_colored = colorize(str(active_count), Colors.DIM)
            total_colored = colorize(str(stats.session_count), Colors.DIM)
        
        session_display = f"{active_colored}/{total_colored}"
        
        # Colored workdir path
        dir_id_colored = colorize(f"{i+1}.", Colors.BRIGHT_CYAN + Colors.BOLD)
        path_colored = colorize(stats.workdir, Colors.BRIGHT_WHITE)
        
        output.append(f"{dir_id_colored} {path_colored}")
        output.append(f"   Session: {session_display} | Last: {time_colored}")
        
        # Add individual session details if collector is available
        if collector:
            sessions = collector.get_workdir_sessions(stats.workdir)
            # Sort sessions by recency and show sessions from last 24 hours
            recent_sessions = []
            for s in sessions:
                # Check if session has activity in last 24 hours
                last_activity = s.end_time or s.start_time
                if last_activity:
                    twenty_four_hours_ago = datetime.now(timezone.utc) - timedelta(hours=24)
                    if last_activity > twenty_four_hours_ago:
                        recent_sessions.append(s)
            
            recent_sessions.sort(key=lambda x: x.end_time or x.start_time, reverse=True)
            
            for j, session in enumerate(recent_sessions[:5]):  # Show max 5 sessions
                session_num = colorize(f"{j+1}.", Colors.CYAN)
                
                # Get or generate session description
                description = get_session_description_for_workdir(session, stats.workdir)
                if not description:
                    description = session.abstract or 'No description available'
                
                # Add status to same line as description
                if session.is_running:
                    status = colorize("(run)", Colors.BRIGHT_RED)
                    desc_with_status = f"{description} {status}"
                elif session.is_active:
                    status = colorize("(active)", Colors.BRIGHT_GREEN)
                    desc_with_status = f"{description} {status}"
                else:
                    # Session exists in 24h window but not active (5h), no status needed
                    desc_with_status = description
                
                desc_colored = colorize(desc_with_status, Colors.WHITE)
                output.append(f"     {session_num} {desc_colored}")
            
            if len(recent_sessions) > 5:
                more_colored = colorize(f"... and {len(recent_sessions) - 5} more sessions", Colors.DIM)
                output.append(f"     {more_colored}")
        
        output.append("")
    
    return "\n".join(output)


def get_session_description_for_workdir(session: SessionInfo, workdir: str) -> Optional[str]:
    """Get a brief description for a session using first/last lines of the prompt."""
    if not session.first_user_prompt:
        return session.abstract
    
    prompt = session.first_user_prompt.strip()
    if not prompt:
        return session.abstract
    
    # Split into lines and remove empty lines
    lines = [line.strip() for line in prompt.split('\n') if line.strip()]
    
    if len(lines) <= 1:
        # Single line or empty, return as is (truncated if too long)
        return prompt[:150] + "..." if len(prompt) > 150 else prompt
    
    elif len(lines) == 2:
        # Two lines, show both
        return f"{lines[0]} ... {lines[1]}"
    
    else:
        # Multiple lines, show first and last
        first_line = lines[0]
        last_line = lines[-1]
        combined = f"{first_line} ... {last_line}"
        
        # Truncate if too long
        if len(combined) > 150:
            # Try to truncate the last line first
            available_space = 150 - len(first_line) - 7  # 7 for " ... "
            if available_space > 20:
                last_line_truncated = last_line[:available_space] + "..."
                combined = f"{first_line} ... {last_line_truncated}"
            else:
                # Truncate the whole thing
                combined = combined[:147] + "..."
        
        return combined


def show_sessions_with_workdir_ids(collector: 'WorkdirStatsCollector') -> None:
    """Show all sessions with workdir-id.session-id format."""
    workdir_stats = collector.collect_all_workdir_stats()
    
    print("🔍 All Sessions with Workdir-Session IDs:")
    print("=" * 60)
    print()
    
    for workdir_idx, stats in enumerate(workdir_stats, 1):
        if stats.session_count == 0:
            continue
            
        # Get detailed session info for this workdir
        sessions = collector.get_workdir_sessions(stats.workdir)
        
        # Show workdir header
        print(f"{workdir_idx}. {stats.workdir}")
        print(f"   Sessions: {len(sessions)}")
        print()
        
        # Show each session with ID
        for session_idx, session in enumerate(sessions, 1):
            session_id = f"{workdir_idx}.{session_idx}"
            time_str = session.start_time.strftime('%Y-%m-%d %H:%M:%S') if session.start_time else 'Unknown'
            
            # Show session abstract or first line of content
            preview = session.abstract if session.abstract else "No description"
            if len(preview) > 80:
                preview = preview[:77] + "..."
            
            print(f"   {session_id} | {time_str} | {preview}")
        
        print()


def show_session_prompts(session_id: str, collector: 'WorkdirStatsCollector') -> None:
    """Show all user prompts for a specific session."""
    try:
        # Parse session ID (format: workdir_id.session_id)
        workdir_id, session_idx = session_id.split('.')
        workdir_id = int(workdir_id) - 1  # Convert to 0-based index
        session_idx = int(session_idx) - 1  # Convert to 0-based index
    except (ValueError, AttributeError):
        print(f"❌ Invalid session ID format: {session_id}")
        print("Format should be: workdir-id.session-id (e.g., 2.1)")
        return
    
    workdir_stats = collector.collect_all_workdir_stats()
    
    if workdir_id >= len(workdir_stats):
        print(f"❌ Workdir ID {workdir_id + 1} not found.")
        return
    
    stats = workdir_stats[workdir_id]
    sessions = collector.get_workdir_sessions(stats.workdir)
    
    if session_idx >= len(sessions):
        print(f"❌ Session ID {session_idx + 1} not found in workdir {workdir_id + 1}.")
        return
    
    session = sessions[session_idx]
    
    print(f"📝 Session {session_id} Prompts:")
    print(f"Workdir: {stats.workdir}")
    print(f"Session: {session.session_id}")
    print(f"Start Time: {session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC') if session.start_time else 'Unknown'}")
    print("=" * 80)
    print()
    
    # Try to read session file and extract prompts
    try:
        from pathlib import Path
        import json
        
        # Find the project directory for this workdir
        project_dir_name = stats.workdir.replace('/', '-')
        if not project_dir_name.startswith('-'):
            project_dir_name = '-' + project_dir_name
        session_file = Path(collector.claude_dir) / "projects" / project_dir_name / f"{session.session_id}.jsonl"
        if not session_file.exists():
            print(f"❌ Session file not found: {session_file}")
            return
        
        user_prompts = []
        
        with open(session_file, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                    
                try:
                    entry = json.loads(line)
                    
                    # Check if this is a user message
                    if entry.get('type') == 'user' and 'message' in entry:
                        message = entry['message']
                        if message.get('role') == 'user':
                            content = message.get('content', '')
                            
                            # Handle both string and list content
                            if isinstance(content, list):
                                # If content is a list, extract text from each item
                                content_parts = []
                                for item in content:
                                    if isinstance(item, dict):
                                        if item.get('type') == 'tool_result':
                                            content_parts.append(f"[Tool Result: {item.get('content', '')}]")
                                        else:
                                            content_parts.append(str(item.get('content', item.get('text', str(item)))))
                                    else:
                                        content_parts.append(str(item))
                                content = '\n'.join(content_parts)
                            elif not isinstance(content, str):
                                content = str(content)
                            
                            content = content.strip()
                            if content:
                                user_prompts.append({
                                    'timestamp': entry.get('timestamp', ''),
                                    'content': content
                                })
                except json.JSONDecodeError:
                    continue
        
        if not user_prompts:
            print("No user prompts found in this session.")
            return
        
        # Display prompts
        for i, prompt in enumerate(user_prompts, 1):
            print(f"Prompt {i}:")
            if prompt['timestamp']:
                # Format timestamp if available
                try:
                    from datetime import datetime
                    dt = datetime.fromisoformat(prompt['timestamp'].replace('Z', '+00:00'))
                    time_str = dt.strftime('%H:%M:%S')
                    print(f"Time: {time_str}")
                except:
                    print(f"Time: {prompt['timestamp']}")
            
            # Show content with proper formatting
            content = prompt['content']
            if len(content) > 500:
                content = content[:497] + "..."
            
            print(content)
            print("-" * 40)
            print()
            
    except Exception as e:
        print(f"❌ Error reading session file: {e}")


def enter_session_by_workdir_id(session_id: str, collector: 'WorkdirStatsCollector') -> None:
    """Enter/activate a session by workdir-id.session-id format."""
    try:
        # Parse session ID (format: workdir_id.session_id) 
        workdir_id, session_idx = session_id.split('.')
        workdir_id = int(workdir_id) - 1  # Convert to 0-based index
        session_idx = int(session_idx) - 1  # Convert to 0-based index
    except (ValueError, AttributeError):
        print(f"❌ Invalid session ID format: {session_id}")
        print("Format should be: workdir-id.session-id (e.g., 2.1)")
        return
    
    workdir_stats = collector.collect_all_workdir_stats()
    
    if workdir_id >= len(workdir_stats):
        print(f"❌ Workdir ID {workdir_id + 1} not found.")
        return
    
    stats = workdir_stats[workdir_id]
    sessions = collector.get_workdir_sessions(stats.workdir)
    
    if session_idx >= len(sessions):
        print(f"❌ Session ID {session_idx + 1} not found in workdir {workdir_id + 1}.")
        return
    
    session = sessions[session_idx]
    
    # Display session information 
    print(f"🔗 Entering Session {session_id}:")
    print(f"📂 Directory: {stats.workdir}")
    print(f"🆔 Session ID: {session.session_id}")
    print(f"⏰ Start Time: {session.start_time.strftime('%Y-%m-%d %H:%M:%S UTC') if session.start_time else 'Unknown'}")
    print(f"📝 Abstract: {session.abstract or 'No abstract available'}")
    print(f"💰 Tokens: {session.total_tokens:,} | Cost: ${session.total_cost:.3f}")
    if session.models_used:
        print(f"🤖 Models: {', '.join(session.models_used)}")
    print()
    print(f"📁 To navigate to session directory:")
    print(f"   cd '{stats.workdir}'")
    print()
    print(f"📄 Session file location:")
    print(f"   {collector.claude_dir}/sessions/{session.session_id}")


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(
        description="Analyze Claude Code usage statistics by working directory"
    )
    parser.add_argument(
        "--claude-dir", 
        help="Path to .claude directory (default: ~/.claude)"
    )
    parser.add_argument(
        "--hours-back", 
        type=int, 
        help="Only show activity from last N hours"
    )
    parser.add_argument(
        "--show-inactive", 
        action="store_true", 
        help="Show inactive workdirs (no activity in last 5 hours)"
    )
    parser.add_argument(
        "--workdir", 
        help="Show details for specific workdir (supports relative paths like '.' for current directory)"
    )
    parser.add_argument(
        "--json", 
        action="store_true", 
        help="Output in JSON format"
    )
    # Create mutually exclusive group for server actions
    server_group = parser.add_mutually_exclusive_group()
    server_group.add_argument(
        "--serve", 
        action="store_true", 
        help="Start web server for frontend interface"
    )
    server_group.add_argument(
        "--stop", 
        action="store_true", 
        help="Stop running web server"
    )
    parser.add_argument(
        "--force", 
        action="store_true", 
        help="Force start server even if port appears to be in use"
    )
    parser.add_argument(
        "--port", 
        type=int, 
        default=8080, 
        help="Port for web server (default: 8080)"
    )
    parser.add_argument(
        "--host", 
        default="localhost", 
        help="Host for web server (default: localhost)"
    )
    parser.add_argument(
        "--debug", 
        action="store_true", 
        help="Enable debug logging"
    )
    parser.add_argument(
        "--default", 
        action="store_true", 
        help="Save current configuration as default or use saved default configuration"
    )
    
    # Search functionality
    parser.add_argument(
        "--search", 
        type=str, 
        metavar="STRING",
        help="Search session history using string matching (searches prompts and abstracts). Use --global to search all workdirs."
    )
    parser.add_argument(
        "--query", 
        type=str, 
        metavar="SENTENCE",
        help="Semantic search for sessions using natural language. Use --global to search all workdirs."
    )
    parser.add_argument(
        "--global", "-g",
        action="store_true",
        help="Search all sessions across all workdirs (default: current workdir only)"
    )
    parser.add_argument(
        "--index-sessions",
        action="store_true",
        help="Create or update semantic search index for all sessions"
    )
    parser.add_argument(
        "--force-reindex",
        action="store_true", 
        help="Force complete reindexing of all sessions (use with --index-sessions)"
    )
    parser.add_argument(
        "--index-status",
        action="store_true",
        help="Show detailed status and statistics of the semantic search index"
    )
    parser.add_argument(
        "--clean",
        action="store_true",
        help="Remove sessions that cannot be indexed (no prompt or very short prompts <10 chars)"
    )
    parser.add_argument(
        "--list", "-l",
        action="store_true",
        help="List working directories with individual sessions (last 24 hours)"
    )
    parser.add_argument(
        "--list-dir", "-d",
        action="store_true",
        help="List all Claude session directories with IDs for selection"
    )
    parser.add_argument(
        "--all", "-a",
        action="store_true",
        help="Include inactive directories when listing (use with -l or --list-dir)"
    )
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="Show detailed information (session counts and timestamps) when listing directories"
    )
    parser.add_argument(
        "--enter-session", "-e",
        type=str,
        metavar="SESSION_ID", 
        help="Enter/activate a specific Claude session by its ID (format: workdir-id.session-id, e.g., 2.1)"
    )
    parser.add_argument(
        "--enter-dir",
        type=str,
        metavar="DIR_ID",
        help="Enter a specific directory by its ID (used by cdwork shell function)"
    )
    parser.add_argument(
        "--show-dir",
        action="store_true",
        help="List all sessions with workdir-id.session-id format (e.g., 1.1, 1.2, 2.1, etc.)"
    )
    parser.add_argument(
        "--show-session",
        type=str,
        metavar="SESSION_ID",
        help="Show all user prompts for a specific session (format: workdir-id.session-id, e.g., 2.1)"
    )
    
    args = parser.parse_args()
    
    # Handle configuration management
    from .config import load_default_config, apply_default_config, save_default_config, has_non_default_args, show_current_config
    
    # If --default is used alone, show current config
    if args.default and not has_non_default_args(args):
        show_current_config()
        return 0
    
    # If --default is used with other args, save the configuration
    if args.default and has_non_default_args(args):
        save_default_config(args)
        # Continue with the command execution
    
    # If no arguments provided, try to load and apply default configuration
    if not has_non_default_args(args) and not args.default:
        default_config = load_default_config()
        if default_config:
            apply_default_config(args, default_config)
        else:
            # No default config and no arguments, show help
            parser.print_help()
            return 0
    
    logging.basicConfig(
        level=logging.DEBUG if args.debug else logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    try:
        if args.stop:
            # Stop web server
            from .utils import stop_server
            if stop_server(args.port):
                print(f"✅ Successfully stopped server on port {args.port}")
                return 0
            else:
                print(f"❌ No server found running on port {args.port}")
                return 1
        
        if args.serve:
            # Start web server
            from .server import start_server
            start_server(
                claude_dir=args.claude_dir,
                hours_back=args.hours_back,
                host=args.host,
                port=args.port,
                debug=args.debug,
                force=args.force
            )
            return 0
        
        collector = WorkdirStatsCollector(
            claude_dir=args.claude_dir,
            hours_back=args.hours_back
        )
        
        # Handle --enter-session functionality for sessions
        if args.enter_session:
            enter_session_by_workdir_id(args.enter_session, collector)
            return 0
        
        # Handle --enter-dir functionality for directories
        if args.enter_dir:
            # Try to use cached directory list first
            directory_list = load_directory_cache()
            if directory_list is None:
                # No valid cache, generate fresh list and save to cache
                directory_list = list_directories_with_ids(collector)
                save_directory_cache(directory_list)
            
            # Enter directory by ID
            try:
                target_id = int(args.enter_dir)
            except ValueError:
                print(f"❌ Invalid directory ID: {args.enter_dir}. Must be a number.")
                return 1
            
            # Find the directory with the given ID
            target_dir = None
            for did, stats in directory_list:
                if did == target_id:
                    target_dir = stats.workdir
                    break
            
            if target_dir is None:
                print(f"❌ Directory ID {target_id} not found.")
                print("Use --list-dir to see available directories.")
                return 1
            
            # Print the cd command for the shell function to execute
            print(f"cd '{target_dir}'")
            return 0
        
        # Handle --list-dir functionality  
        if args.list_dir:
            # Try to use cached directory list first
            directory_list = load_directory_cache()
            if directory_list is None:
                # No valid cache, generate fresh list and save to cache
                directory_list = list_directories_with_ids(collector)
                save_directory_cache(directory_list)
            
            # Use the --all flag to determine whether to show inactive directories
            show_all = getattr(args, 'all', False)
            display_directory_list(directory_list, show_all=show_all)
            return 0
        
        # Handle --show-dir functionality
        if args.show_dir:
            show_sessions_with_workdir_ids(collector)
            return 0
        
        # Handle --show-session functionality
        if args.show_session:
            show_session_prompts(args.show_session, collector)
            return 0
        
        # Handle session cleaning
        if args.clean:
            clean_unindexable_sessions(collector)
            return 0
        
        # Handle index status
        if args.index_status:
            display_index_status(collector)
            return 0
        
        # Handle indexing functionality
        if args.index_sessions:
            if SEMANTIC_RAG_AVAILABLE:
                try:
                    semantic_searcher = create_semantic_searcher()
                    if semantic_searcher:
                        print("🚀 Starting semantic search indexing...")
                        
                        async def do_indexing():
                            success, error = await semantic_searcher.initialize()
                            if success:
                                await semantic_searcher.index_sessions(collector, force_reindex=args.force_reindex)
                                print("✅ Semantic search indexing completed!")
                            else:
                                print(f"❌ Failed to initialize semantic searcher: {error}")
                        
                        asyncio.run(do_indexing())
                    else:
                        print("❌ Could not create semantic searcher")
                except Exception as e:
                    print(f"❌ Indexing failed: {e}")
            else:
                print("❌ Semantic search not available. Please install semantic-rag dependencies.")
            return 0
        
        # Handle search functionality
        if args.search or args.query:
            # Apply workdir filtering based on flags
            workdir_filter = None  # Default to global search
            
            # Check if global search is requested
            global_search = getattr(args, 'global', False)
            
            if not global_search:
                # If --global is not specified, default to current directory
                if args.workdir:
                    workdir_filter = str(Path(args.workdir).resolve())
                else:
                    workdir_filter = str(Path.cwd())
            elif args.workdir:
                # If --workdir is explicitly specified, use it even with --global
                workdir_filter = str(Path(args.workdir).resolve())
            # If --global is specified and no --workdir, workdir_filter stays None (global search)
            
            if args.search:
                search_sessions_by_string(collector, args.search, workdir_filter)
            elif args.query:
                search_sessions_by_query(collector, args.query, workdir_filter)
            return 0
        
        stats_list = collector.collect_all_workdir_stats()
        
        if args.workdir:
            # Resolve the workdir to absolute path for comparison
            target_workdir = str(Path(args.workdir).resolve())
            
            # Filter to specific workdir (match both absolute and provided path)
            stats_list = [s for s in stats_list if s.workdir == target_workdir or s.workdir == args.workdir]
            if not stats_list:
                print(f"No activity found for workdir: {args.workdir}")
                print(f"Resolved path: {target_workdir}")
                
                # Show available workdirs for debugging
                all_stats = collector.collect_all_workdir_stats()
                if all_stats:
                    print("\nAvailable workdirs:")
                    for stats in all_stats:
                        print(f"  {stats.workdir}")
                return
        
        if args.json:
            # Output as JSON
            json_data = []
            for stats in stats_list:
                json_data.append({
                    'workdir': stats.workdir,
                    'active_sessions': stats.active_sessions,
                    'total_tokens': stats.total_tokens,
                    'input_tokens': stats.input_tokens,
                    'output_tokens': stats.output_tokens,
                    'cache_creation_tokens': stats.cache_creation_tokens,
                    'cache_read_tokens': stats.cache_read_tokens,
                    'total_cost': stats.total_cost,
                    'last_activity': stats.last_activity.isoformat() if stats.last_activity else None,
                    'session_count': stats.session_count,
                    'models_used': stats.models_used
                })
            print(json.dumps(json_data, indent=2))
        else:
            # Human-readable output
            # Use simple format if --list is specified, otherwise use detailed format
            simple_format = hasattr(args, 'list') and args.list
            
            # Check if --all flag is used, which should override show_inactive
            show_inactive = args.show_inactive or getattr(args, 'all', False)
            print(format_workdir_stats(stats_list, show_inactive=show_inactive, collector=collector, simple_format=simple_format))
            
    except Exception as e:
        logging.error(f"Error: {e}")
        if args.debug:
            raise
        return 1
    
    return 0


def main_cdwork():
    """Main entry point for cdwork command - simplified directory navigation."""
    import sys
    import argparse
    from pathlib import Path
    
    parser = argparse.ArgumentParser(
        description="Navigate to Claude session directory by ID",
        usage="cdwork [DIR_ID]  # Use 'cdwork' without args to list directories"
    )
    parser.add_argument(
        "dir_id", 
        nargs="?",
        type=int,
        help="Directory ID to navigate to (omit to list directories)"
    )
    parser.add_argument(
        "--claude-dir", 
        help="Path to .claude directory (default: ~/.claude)"
    )
    parser.add_argument(
        "--hours-back", 
        type=int, 
        help="Only show activity from last N hours"
    )
    parser.add_argument(
        "--refresh", "-r",
        action="store_true",
        help="Force refresh the directory list cache"
    )
    parser.add_argument(
        "--clear-cache",
        action="store_true",
        help="Clear the directory list cache and exit"
    )
    
    args = parser.parse_args()
    
    # Handle cache clearing
    if args.clear_cache:
        clear_directory_cache()
        print("✅ Directory cache cleared")
        return 0
    
    collector = WorkdirStatsCollector(
        claude_dir=args.claude_dir,
        hours_back=args.hours_back
    )
    
    # Try to use cached directory list first (unless refresh is requested)
    directory_list = None if args.refresh else load_directory_cache()
    if directory_list is None:
        # No valid cache or refresh requested, generate fresh list and save to cache
        directory_list = list_directories_with_ids(collector)
        save_directory_cache(directory_list)
    
    if args.dir_id is not None:
        # Navigate to specific directory
        target_dir = None
        for did, stats in directory_list:
            if did == args.dir_id:
                target_dir = stats.workdir
                break
        
        if target_dir is None:
            print(f"❌ Directory ID {args.dir_id} not found.")
            print("Available directories:")
            for did, stats in directory_list:
                print(f"   {did}: {format_path(stats.workdir)}")
            return 1
        
        # Output cd command for shell function to use
        print(f"cd '{target_dir}'")
        return 0
    else:
        # List directories when no ID provided
        display_directory_list(directory_list)
        print()
        print("💡 To actually change directories, create a shell function:")
        print("   function cdw() { eval \"$(cdwork \"$@\")\"; }")
        print("   Then use: cdw 2")
        return 0


if __name__ == "__main__":
    exit(main())