#!/usr/bin/env python3
"""
Claude Code Workdir Statistics Tool

This tool analyzes ~/.claude directory to extract statistics about all active
working directories where Claude CLI has been used.
"""

import json
import logging
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any
import argparse
from dataclasses import dataclass


@dataclass
class WorkdirStats:
    """Statistics for a specific working directory."""
    workdir: str
    active_sessions: List[str]
    total_tokens: int
    input_tokens: int
    output_tokens: int
    cache_creation_tokens: int
    cache_read_tokens: int
    total_cost: float
    last_activity: datetime
    session_count: int
    models_used: List[str]


class WorkdirPathParser:
    """Converts Claude project directory names back to original working directory paths."""
    
    @staticmethod
    def parse_project_dir_name(project_dir_name: str) -> str:
        """Convert project directory name back to original workdir path.
        
        Args:
            project_dir_name: Directory name like '-home-user-project'
            
        Returns:
            Original path like '/home/user/project'
        """
        if not project_dir_name.startswith('-'):
            return project_dir_name
        
        # Remove leading dash and convert remaining dashes to path separators
        path_parts = project_dir_name[1:].split('-')
        return '/' + '/'.join(path_parts)
    
    @staticmethod
    def get_workdir_from_jsonl(jsonl_file: Path) -> Optional[str]:
        """Extract workdir from JSONL file by reading the first entry's cwd field.
        
        Args:
            jsonl_file: Path to JSONL file
            
        Returns:
            Working directory path or None if not found
        """
        try:
            with open(jsonl_file, 'r', encoding='utf-8') as f:
                first_line = f.readline().strip()
                if first_line:
                    data = json.loads(first_line)
                    return data.get('cwd')
        except (json.JSONDecodeError, IOError) as e:
            logging.debug(f"Could not extract cwd from {jsonl_file}: {e}")
        return None


class SessionScanner:
    """Scans JSONL files for active sessions and session data."""
    
    def __init__(self, hours_back: Optional[int] = None):
        """Initialize scanner with optional time filter.
        
        Args:
            hours_back: Only consider sessions from last N hours (None for all)
        """
        self.hours_back = hours_back
        self.cutoff_time = None
        if hours_back:
            self.cutoff_time = datetime.now(timezone.utc) - timedelta(hours=hours_back)
    
    def scan_jsonl_file(self, jsonl_file: Path) -> Tuple[bool, Dict[str, Any]]:
        """Scan a JSONL file for active sessions and collect statistics.
        
        Args:
            jsonl_file: Path to JSONL file to scan
            
        Returns:
            Tuple of (has_active_session, session_stats)
        """
        has_active = False
        stats = {
            'total_tokens': 0,
            'input_tokens': 0,
            'output_tokens': 0,
            'cache_creation_tokens': 0,
            'cache_read_tokens': 0,
            'total_cost': 0.0,
            'last_activity': None,
            'session_ids': set(),
            'models_used': set(),
            'active_session_ids': set()
        }
        
        try:
            with open(jsonl_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    
                    try:
                        data = json.loads(line)
                        
                        # Check timestamp filter
                        if self.cutoff_time and 'timestamp' in data:
                            timestamp_str = data['timestamp']
                            try:
                                timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
                                if timestamp < self.cutoff_time:
                                    continue
                            except ValueError:
                                continue
                        
                        # Track session IDs
                        session_id = data.get('sessionId')
                        if session_id:
                            stats['session_ids'].add(session_id)
                        
                        # Update last activity
                        if 'timestamp' in data:
                            try:
                                timestamp = datetime.fromisoformat(data['timestamp'].replace('Z', '+00:00'))
                                if not stats['last_activity'] or timestamp > stats['last_activity']:
                                    stats['last_activity'] = timestamp
                            except ValueError:
                                pass
                        
                        # Process assistant messages with usage data
                        if data.get('type') == 'assistant' and 'message' in data:
                            message = data['message']
                            if 'usage' in message:
                                usage = message['usage']
                                
                                # Track model
                                model = message.get('model', 'unknown')
                                stats['models_used'].add(model)
                                
                                # Accumulate token usage
                                stats['input_tokens'] += usage.get('input_tokens', 0)
                                stats['output_tokens'] += usage.get('output_tokens', 0)
                                stats['cache_creation_tokens'] += usage.get('cache_creation_input_tokens', 0)
                                stats['cache_read_tokens'] += usage.get('cache_read_input_tokens', 0)
                                
                                # Calculate total tokens
                                total = (stats['input_tokens'] + stats['output_tokens'] + 
                                        stats['cache_creation_tokens'] + stats['cache_read_tokens'])
                                stats['total_tokens'] = total
                                
                                # Estimate cost (rough calculation - would need proper pricing)
                                # This is a simplified cost calculation
                                input_cost = stats['input_tokens'] * 0.0015 / 1000  # $1.50 per 1M input tokens
                                output_cost = stats['output_tokens'] * 0.0075 / 1000  # $7.50 per 1M output tokens
                                cache_creation_cost = stats['cache_creation_tokens'] * 0.00375 / 1000  # Cache creation cost
                                cache_read_cost = stats['cache_read_tokens'] * 0.00015 / 1000  # Cache read cost
                                stats['total_cost'] = input_cost + output_cost + cache_creation_cost + cache_read_cost
                        
                        # Note: The concept of "active session" in the context of JSONL files is different
                        # from the monitor's real-time "isActive" blocks. Here we consider any recent
                        # activity as potentially active.
                        
                    except json.JSONDecodeError:
                        continue
                        
        except IOError as e:
            logging.debug(f"Could not read {jsonl_file}: {e}")
            
        # Convert sets to lists for JSON serialization
        stats['session_ids'] = list(stats['session_ids'])
        stats['models_used'] = list(stats['models_used'])
        stats['active_session_ids'] = list(stats['active_session_ids'])
        
        # Consider a session "active" if there was activity in the last 5 hours
        # (matching Claude's 5-hour session window)
        five_hours_ago = datetime.now(timezone.utc) - timedelta(hours=5)
        if stats['last_activity'] and stats['last_activity'] > five_hours_ago:
            has_active = True
            stats['active_session_ids'] = stats['session_ids']
        
        return has_active, stats


class WorkdirStatsCollector:
    """Main class for collecting workdir statistics."""
    
    def __init__(self, claude_dir: Optional[str] = None, hours_back: Optional[int] = None):
        """Initialize collector.
        
        Args:
            claude_dir: Path to .claude directory (defaults to ~/.claude)
            hours_back: Only consider activity from last N hours
        """
        self.claude_dir = Path(claude_dir) if claude_dir else Path.home() / '.claude'
        self.projects_dir = self.claude_dir / 'projects'
        self.scanner = SessionScanner(hours_back=hours_back)
        self.path_parser = WorkdirPathParser()
    
    def collect_all_workdir_stats(self) -> List[WorkdirStats]:
        """Collect statistics for all workdirs with Claude activity.
        
        Returns:
            List of WorkdirStats objects
        """
        workdir_stats = []
        
        if not self.projects_dir.exists():
            logging.warning(f"Projects directory not found: {self.projects_dir}")
            return workdir_stats
        
        for project_dir in self.projects_dir.iterdir():
            if not project_dir.is_dir():
                continue
            
            # Get the actual workdir path
            workdir = self._get_workdir_for_project(project_dir)
            if not workdir:
                continue
            
            # Scan all JSONL files in the project directory
            combined_stats = self._scan_project_directory(project_dir)
            
            if combined_stats and (combined_stats['session_ids'] or 
                                  combined_stats.get('active_session_ids')):
                stats = WorkdirStats(
                    workdir=workdir,
                    active_sessions=combined_stats.get('active_session_ids', []),
                    total_tokens=combined_stats.get('total_tokens', 0),
                    input_tokens=combined_stats.get('input_tokens', 0),
                    output_tokens=combined_stats.get('output_tokens', 0),
                    cache_creation_tokens=combined_stats.get('cache_creation_tokens', 0),
                    cache_read_tokens=combined_stats.get('cache_read_tokens', 0),
                    total_cost=combined_stats.get('total_cost', 0.0),
                    last_activity=combined_stats.get('last_activity'),
                    session_count=len(combined_stats.get('session_ids', [])),
                    models_used=combined_stats.get('models_used', [])
                )
                workdir_stats.append(stats)
        
        # Sort by last activity (most recent first)
        workdir_stats.sort(key=lambda x: x.last_activity or datetime.min.replace(tzinfo=timezone.utc), 
                          reverse=True)
        
        return workdir_stats
    
    def _get_workdir_for_project(self, project_dir: Path) -> Optional[str]:
        """Get the working directory path for a project directory."""
        # First try to parse from directory name
        workdir = self.path_parser.parse_project_dir_name(project_dir.name)
        
        # Verify by checking JSONL files for actual cwd
        for jsonl_file in project_dir.glob('*.jsonl'):
            actual_workdir = self.path_parser.get_workdir_from_jsonl(jsonl_file)
            if actual_workdir:
                return actual_workdir
        
        return workdir
    
    def _scan_project_directory(self, project_dir: Path) -> Optional[Dict[str, Any]]:
        """Scan all JSONL files in a project directory and combine stats."""
        combined_stats = {
            'total_tokens': 0,
            'input_tokens': 0,
            'output_tokens': 0,
            'cache_creation_tokens': 0,
            'cache_read_tokens': 0,
            'total_cost': 0.0,
            'last_activity': None,
            'session_ids': set(),
            'models_used': set(),
            'active_session_ids': set()
        }
        
        has_any_activity = False
        
        for jsonl_file in project_dir.glob('*.jsonl'):
            has_active, file_stats = self.scanner.scan_jsonl_file(jsonl_file)
            
            if file_stats['session_ids']:
                has_any_activity = True
                
                # Combine numeric stats
                for key in ['total_tokens', 'input_tokens', 'output_tokens', 
                           'cache_creation_tokens', 'cache_read_tokens', 'total_cost']:
                    combined_stats[key] += file_stats.get(key, 0)
                
                # Update last activity
                if file_stats['last_activity']:
                    if (not combined_stats['last_activity'] or 
                        file_stats['last_activity'] > combined_stats['last_activity']):
                        combined_stats['last_activity'] = file_stats['last_activity']
                
                # Combine sets
                combined_stats['session_ids'].update(file_stats['session_ids'])
                combined_stats['models_used'].update(file_stats['models_used'])
                
                if has_active:
                    combined_stats['active_session_ids'].update(file_stats['active_session_ids'])
        
        if not has_any_activity:
            return None
        
        # Convert sets to lists
        for key in ['session_ids', 'models_used', 'active_session_ids']:
            combined_stats[key] = list(combined_stats[key])
        
        return combined_stats


def format_workdir_stats(stats_list: List[WorkdirStats], show_inactive: bool = False) -> str:
    """Format workdir statistics for display."""
    if not stats_list:
        return "No Claude activity found in any working directories."
    
    output = []
    output.append("=" * 80)
    output.append("CLAUDE CODE WORKDIR STATISTICS")
    output.append("=" * 80)
    output.append("")
    
    active_count = sum(1 for s in stats_list if s.active_sessions)
    total_count = len(stats_list)
    
    output.append(f"Found {total_count} workdirs with Claude activity")
    output.append(f"Active workdirs (last 5 hours): {active_count}")
    output.append("")
    
    for i, stats in enumerate(stats_list):
        if not show_inactive and not stats.active_sessions:
            continue
            
        status = "🟢 ACTIVE" if stats.active_sessions else "⚪ INACTIVE"
        output.append(f"{i+1}. {status} {stats.workdir}")
        output.append(f"   Last Activity: {stats.last_activity.strftime('%Y-%m-%d %H:%M:%S UTC') if stats.last_activity else 'Unknown'}")
        output.append(f"   Sessions: {stats.session_count} | Active: {len(stats.active_sessions)}")
        output.append(f"   Tokens: {stats.total_tokens:,} (In: {stats.input_tokens:,}, Out: {stats.output_tokens:,})")
        if stats.cache_creation_tokens or stats.cache_read_tokens:
            output.append(f"   Cache: Created {stats.cache_creation_tokens:,}, Read {stats.cache_read_tokens:,}")
        output.append(f"   Cost: ${stats.total_cost:.3f}")
        if stats.models_used:
            output.append(f"   Models: {', '.join(stats.models_used)}")
        output.append("")
    
    return "\n".join(output)


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(
        description="Analyze Claude Code usage statistics by working directory"
    )
    parser.add_argument(
        "--claude-dir", 
        help="Path to .claude directory (default: ~/.claude)"
    )
    parser.add_argument(
        "--hours-back", 
        type=int, 
        help="Only show activity from last N hours"
    )
    parser.add_argument(
        "--show-inactive", 
        action="store_true", 
        help="Show inactive workdirs (no activity in last 5 hours)"
    )
    parser.add_argument(
        "--workdir", 
        help="Show details for specific workdir"
    )
    parser.add_argument(
        "--json", 
        action="store_true", 
        help="Output in JSON format"
    )
    parser.add_argument(
        "--debug", 
        action="store_true", 
        help="Enable debug logging"
    )
    
    args = parser.parse_args()
    
    logging.basicConfig(
        level=logging.DEBUG if args.debug else logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    try:
        collector = WorkdirStatsCollector(
            claude_dir=args.claude_dir,
            hours_back=args.hours_back
        )
        
        stats_list = collector.collect_all_workdir_stats()
        
        if args.workdir:
            # Filter to specific workdir
            stats_list = [s for s in stats_list if s.workdir == args.workdir]
            if not stats_list:
                print(f"No activity found for workdir: {args.workdir}")
                return
        
        if args.json:
            # Output as JSON
            json_data = []
            for stats in stats_list:
                json_data.append({
                    'workdir': stats.workdir,
                    'active_sessions': stats.active_sessions,
                    'total_tokens': stats.total_tokens,
                    'input_tokens': stats.input_tokens,
                    'output_tokens': stats.output_tokens,
                    'cache_creation_tokens': stats.cache_creation_tokens,
                    'cache_read_tokens': stats.cache_read_tokens,
                    'total_cost': stats.total_cost,
                    'last_activity': stats.last_activity.isoformat() if stats.last_activity else None,
                    'session_count': stats.session_count,
                    'models_used': stats.models_used
                })
            print(json.dumps(json_data, indent=2))
        else:
            # Human-readable output
            print(format_workdir_stats(stats_list, show_inactive=args.show_inactive))
            
    except Exception as e:
        logging.error(f"Error: {e}")
        if args.debug:
            raise
        return 1
    
    return 0


if __name__ == "__main__":
    exit(main())