"""
Core classes and functionality for Claude Code workdir statistics.
"""

import json
import logging
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any
from dataclasses import dataclass


@dataclass
class SessionInfo:
    """Information about a specific session."""
    session_id: str
    start_time: datetime
    end_time: Optional[datetime]
    is_active: bool
    is_running: bool  # Very recent activity (last 30 minutes)
    total_tokens: int
    input_tokens: int
    output_tokens: int
    cache_creation_tokens: int
    cache_read_tokens: int
    total_cost: float
    models_used: List[str]
    first_user_prompt: Optional[str]
    abstract: Optional[str]


@dataclass
class WorkdirStats:
    """Statistics for a specific working directory."""
    workdir: str
    active_sessions: List[str]
    total_tokens: int
    input_tokens: int
    output_tokens: int
    cache_creation_tokens: int
    cache_read_tokens: int
    total_cost: float
    last_activity: datetime
    session_count: int
    models_used: List[str]


class WorkdirPathParser:
    """Converts Claude project directory names back to original working directory paths."""
    
    @staticmethod
    def parse_project_dir_name(project_dir_name: str) -> str:
        """Convert project directory name back to original workdir path.
        
        Args:
            project_dir_name: Directory name like '-home-user-project'
            
        Returns:
            Original path like '/home/user/project'
        """
        if not project_dir_name.startswith('-'):
            return project_dir_name
        
        # Remove leading dash and convert remaining dashes to path separators
        path_parts = project_dir_name[1:].split('-')
        return '/' + '/'.join(path_parts)
    
    @staticmethod
    def get_workdir_from_jsonl(jsonl_file: Path) -> Optional[str]:
        """Extract workdir from JSONL file by reading the first entry's cwd field.
        
        Args:
            jsonl_file: Path to JSONL file
            
        Returns:
            Working directory path or None if not found
        """
        try:
            with open(jsonl_file, 'r', encoding='utf-8') as f:
                first_line = f.readline().strip()
                if first_line:
                    data = json.loads(first_line)
                    return data.get('cwd')
        except (json.JSONDecodeError, IOError) as e:
            logging.debug(f"Could not extract cwd from {jsonl_file}: {e}")
        return None


class SessionScanner:
    """Scans JSONL files for active sessions and session data."""
    
    def __init__(self, hours_back: Optional[int] = None):
        """Initialize scanner with optional time filter.
        
        Args:
            hours_back: Only consider sessions from last N hours (None for all)
        """
        self.hours_back = hours_back
        self.cutoff_time = None
        if hours_back:
            self.cutoff_time = datetime.now(timezone.utc) - timedelta(hours=hours_back)
    
    def scan_jsonl_file(self, jsonl_file: Path) -> Tuple[bool, Dict[str, Any]]:
        """Scan a JSONL file for active sessions and collect statistics.
        
        Args:
            jsonl_file: Path to JSONL file to scan
            
        Returns:
            Tuple of (has_active_session, session_stats)
        """
        has_active = False
        stats = {
            'total_tokens': 0,
            'input_tokens': 0,
            'output_tokens': 0,
            'cache_creation_tokens': 0,
            'cache_read_tokens': 0,
            'total_cost': 0.0,
            'last_activity': None,
            'session_ids': set(),
            'models_used': set(),
            'active_session_ids': set()
        }
        
        try:
            with open(jsonl_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    
                    try:
                        data = json.loads(line)
                        
                        # Check timestamp filter
                        if self.cutoff_time and 'timestamp' in data:
                            timestamp_str = data['timestamp']
                            try:
                                timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
                                if timestamp < self.cutoff_time:
                                    continue
                            except ValueError:
                                continue
                        
                        # Track session IDs
                        session_id = data.get('sessionId')
                        if session_id:
                            stats['session_ids'].add(session_id)
                        
                        # Update last activity
                        if 'timestamp' in data:
                            try:
                                timestamp = datetime.fromisoformat(data['timestamp'].replace('Z', '+00:00'))
                                if not stats['last_activity'] or timestamp > stats['last_activity']:
                                    stats['last_activity'] = timestamp
                            except ValueError:
                                pass
                        
                        # Process assistant messages with usage data
                        if data.get('type') == 'assistant' and 'message' in data:
                            message = data['message']
                            if 'usage' in message:
                                usage = message['usage']
                                
                                # Track model
                                model = message.get('model', 'unknown')
                                stats['models_used'].add(model)
                                
                                # Accumulate token usage
                                stats['input_tokens'] += usage.get('input_tokens', 0)
                                stats['output_tokens'] += usage.get('output_tokens', 0)
                                stats['cache_creation_tokens'] += usage.get('cache_creation_input_tokens', 0)
                                stats['cache_read_tokens'] += usage.get('cache_read_input_tokens', 0)
                                
                                # Calculate total tokens
                                total = (stats['input_tokens'] + stats['output_tokens'] + 
                                        stats['cache_creation_tokens'] + stats['cache_read_tokens'])
                                stats['total_tokens'] = total
                                
                                # Estimate cost (rough calculation - would need proper pricing)
                                # This is a simplified cost calculation
                                input_cost = stats['input_tokens'] * 0.0015 / 1000  # $1.50 per 1M input tokens
                                output_cost = stats['output_tokens'] * 0.0075 / 1000  # $7.50 per 1M output tokens
                                cache_creation_cost = stats['cache_creation_tokens'] * 0.00375 / 1000  # Cache creation cost
                                cache_read_cost = stats['cache_read_tokens'] * 0.00015 / 1000  # Cache read cost
                                stats['total_cost'] = input_cost + output_cost + cache_creation_cost + cache_read_cost
                        
                        # Note: The concept of "active session" in the context of JSONL files is different
                        # from the monitor's real-time "isActive" blocks. Here we consider any recent
                        # activity as potentially active.
                        
                    except json.JSONDecodeError:
                        continue
                        
        except IOError as e:
            logging.debug(f"Could not read {jsonl_file}: {e}")
            
        # Convert sets to lists for JSON serialization
        stats['session_ids'] = list(stats['session_ids'])
        stats['models_used'] = list(stats['models_used'])
        stats['active_session_ids'] = list(stats['active_session_ids'])
        
        # Consider a session "active" if there was activity in the last 5 hours
        # (this is for the workdir stats summary, individual sessions use 24h window for display)
        five_hours_ago = datetime.now(timezone.utc) - timedelta(hours=5)
        if stats['last_activity'] and stats['last_activity'] > five_hours_ago:
            has_active = True
            stats['active_session_ids'] = stats['session_ids']
        
        return has_active, stats


class WorkdirStatsCollector:
    """Main class for collecting workdir statistics."""
    
    def __init__(self, claude_dir: Optional[str] = None, hours_back: Optional[int] = None):
        """Initialize collector.
        
        Args:
            claude_dir: Path to .claude directory (defaults to ~/.claude)
            hours_back: Only consider activity from last N hours
        """
        self.claude_dir = Path(claude_dir) if claude_dir else Path.home() / '.claude'
        self.projects_dir = self.claude_dir / 'projects'
        self.scanner = SessionScanner(hours_back=hours_back)
        self.path_parser = WorkdirPathParser()
    
    def collect_all_workdir_stats(self) -> List[WorkdirStats]:
        """Collect statistics for all workdirs with Claude activity.
        
        Returns:
            List of WorkdirStats objects
        """
        workdir_stats = []
        
        if not self.projects_dir.exists():
            logging.warning(f"Projects directory not found: {self.projects_dir}")
            return workdir_stats
        
        for project_dir in self.projects_dir.iterdir():
            if not project_dir.is_dir():
                continue
            
            # Get the actual workdir path
            workdir = self._get_workdir_for_project(project_dir)
            if not workdir:
                continue
            
            # Scan all JSONL files in the project directory
            combined_stats = self._scan_project_directory(project_dir)
            
            if combined_stats and (combined_stats['session_ids'] or 
                                  combined_stats.get('active_session_ids')):
                stats = WorkdirStats(
                    workdir=workdir,
                    active_sessions=combined_stats.get('active_session_ids', []),
                    total_tokens=combined_stats.get('total_tokens', 0),
                    input_tokens=combined_stats.get('input_tokens', 0),
                    output_tokens=combined_stats.get('output_tokens', 0),
                    cache_creation_tokens=combined_stats.get('cache_creation_tokens', 0),
                    cache_read_tokens=combined_stats.get('cache_read_tokens', 0),
                    total_cost=combined_stats.get('total_cost', 0.0),
                    last_activity=combined_stats.get('last_activity'),
                    session_count=len(combined_stats.get('session_ids', [])),
                    models_used=combined_stats.get('models_used', [])
                )
                workdir_stats.append(stats)
        
        # Sort by last activity (most recent first)
        workdir_stats.sort(key=lambda x: x.last_activity or datetime.min.replace(tzinfo=timezone.utc), 
                          reverse=True)
        
        return workdir_stats
    
    def _get_workdir_for_project(self, project_dir: Path) -> Optional[str]:
        """Get the working directory path for a project directory."""
        # First try to parse from directory name
        workdir = self.path_parser.parse_project_dir_name(project_dir.name)
        
        # Verify by checking JSONL files for actual cwd
        for jsonl_file in project_dir.glob('*.jsonl'):
            actual_workdir = self.path_parser.get_workdir_from_jsonl(jsonl_file)
            if actual_workdir:
                return actual_workdir
        
        return workdir
    
    def _scan_project_directory(self, project_dir: Path) -> Optional[Dict[str, Any]]:
        """Scan all JSONL files in a project directory and combine stats."""
        combined_stats = {
            'total_tokens': 0,
            'input_tokens': 0,
            'output_tokens': 0,
            'cache_creation_tokens': 0,
            'cache_read_tokens': 0,
            'total_cost': 0.0,
            'last_activity': None,
            'session_ids': set(),
            'models_used': set(),
            'active_session_ids': set()
        }
        
        has_any_activity = False
        
        for jsonl_file in project_dir.glob('*.jsonl'):
            has_active, file_stats = self.scanner.scan_jsonl_file(jsonl_file)
            
            if file_stats['session_ids']:
                has_any_activity = True
                
                # Combine numeric stats
                for key in ['total_tokens', 'input_tokens', 'output_tokens', 
                           'cache_creation_tokens', 'cache_read_tokens', 'total_cost']:
                    combined_stats[key] += file_stats.get(key, 0)
                
                # Update last activity
                if file_stats['last_activity']:
                    if (not combined_stats['last_activity'] or 
                        file_stats['last_activity'] > combined_stats['last_activity']):
                        combined_stats['last_activity'] = file_stats['last_activity']
                
                # Combine sets
                combined_stats['session_ids'].update(file_stats['session_ids'])
                combined_stats['models_used'].update(file_stats['models_used'])
                
                if has_active:
                    combined_stats['active_session_ids'].update(file_stats['active_session_ids'])
        
        if not has_any_activity:
            return None
        
        # Convert sets to lists
        for key in ['session_ids', 'models_used', 'active_session_ids']:
            combined_stats[key] = list(combined_stats[key])
        
        return combined_stats
    
    def get_workdir_sessions(self, workdir: str) -> List[SessionInfo]:
        """Get detailed session information for a specific workdir."""
        sessions = {}
        
        # Find the project directory for this workdir
        project_dir = None
        for proj_dir in self.projects_dir.iterdir():
            if not proj_dir.is_dir():
                continue
            
            actual_workdir = self._get_workdir_for_project(proj_dir)
            if actual_workdir == workdir:
                project_dir = proj_dir
                break
        
        if not project_dir:
            return []
        
        # Scan all JSONL files for sessions
        for jsonl_file in project_dir.glob('*.jsonl'):
            self._extract_sessions_from_file(jsonl_file, sessions)
        
        # Convert to SessionInfo objects and sort by start time
        session_list = []
        for session_id, session_data in sessions.items():
            session_info = SessionInfo(
                session_id=session_id,
                start_time=session_data['start_time'],
                end_time=session_data.get('end_time'),
                is_active=session_data['is_active'],
                is_running=session_data['is_running'],
                total_tokens=session_data['total_tokens'],
                input_tokens=session_data['input_tokens'],
                output_tokens=session_data['output_tokens'],
                cache_creation_tokens=session_data['cache_creation_tokens'],
                cache_read_tokens=session_data['cache_read_tokens'],
                total_cost=session_data['total_cost'],
                models_used=list(session_data['models_used']),
                first_user_prompt=session_data.get('first_user_prompt'),
                abstract=session_data.get('abstract')
            )
            session_list.append(session_info)
        
        # Sort by start time (most recent first)
        session_list.sort(key=lambda x: x.start_time, reverse=True)
        return session_list
    
    def _extract_sessions_from_file(self, jsonl_file: Path, sessions: Dict[str, Dict]):
        """Extract session data from a JSONL file."""
        try:
            with open(jsonl_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    
                    try:
                        data = json.loads(line)
                        session_id = data.get('sessionId')
                        if not session_id:
                            continue
                        
                        # Initialize session if not exists
                        if session_id not in sessions:
                            sessions[session_id] = {
                                'start_time': None,
                                'end_time': None,
                                'exists_in_window': False,
                                'is_active': False,
                                'is_running': False,
                                'total_tokens': 0,
                                'input_tokens': 0,
                                'output_tokens': 0,
                                'cache_creation_tokens': 0,
                                'cache_read_tokens': 0,
                                'total_cost': 0.0,
                                'models_used': set(),
                                'first_user_prompt': None,
                                'abstract': None
                            }
                        
                        session = sessions[session_id]
                        
                        # Parse timestamp
                        timestamp = None
                        if 'timestamp' in data:
                            try:
                                timestamp = datetime.fromisoformat(data['timestamp'].replace('Z', '+00:00'))
                            except ValueError:
                                pass
                        
                        # Update start/end times
                        if timestamp:
                            if not session['start_time'] or timestamp < session['start_time']:
                                session['start_time'] = timestamp
                            if not session['end_time'] or timestamp > session['end_time']:
                                session['end_time'] = timestamp
                        
                        # Check session activity levels
                        twenty_four_hours_ago = datetime.now(timezone.utc) - timedelta(hours=24)
                        five_hours_ago = datetime.now(timezone.utc) - timedelta(hours=5)
                        thirty_minutes_ago = datetime.now(timezone.utc) - timedelta(minutes=30)
                        
                        if timestamp and timestamp > twenty_four_hours_ago:
                            # Session exists within 24 hours (for list display)
                            session['exists_in_window'] = True
                        
                        if timestamp and timestamp > five_hours_ago:
                            # Session is truly "active" (recent activity)
                            session['is_active'] = True
                            
                        # Check if session is running (activity in last 30 minutes)
                        if timestamp and timestamp > thirty_minutes_ago:
                            session['is_running'] = True
                        
                        # Extract first user prompt using claude-code-exporter method
                        if data.get('message') and data['message'].get('role') == 'user' and not session['first_user_prompt']:
                            message_data = data['message']
                            user_content = message_data.get('content', '')
                            
                            # Handle different content formats like claude-code-exporter
                            if isinstance(user_content, list):
                                # Handle content array format (multimodal messages)
                                text_parts = []
                                for item in user_content:
                                    if isinstance(item, dict) and item.get('type') == 'text':
                                        text_parts.append(item.get('text', ''))
                                    elif isinstance(item, str):
                                        text_parts.append(item)
                                user_content = ' '.join(text_parts)
                            elif not isinstance(user_content, str):
                                # Skip non-string content that we can't process
                                continue
                            
                            # Filter out system-generated content like claude-code-exporter does
                            if user_content and len(user_content.strip()) > 10:
                                if not self._is_system_generated_content(user_content):
                                    session['first_user_prompt'] = user_content
                                    session['abstract'] = self._generate_abstract(user_content)
                        
                        # Process assistant messages for usage stats
                        if data.get('type') == 'assistant' and 'message' in data:
                            message = data['message']
                            if 'usage' in message:
                                usage = message['usage']
                                
                                # Track model
                                model = message.get('model', 'unknown')
                                session['models_used'].add(model)
                                
                                # Accumulate token usage
                                session['input_tokens'] += usage.get('input_tokens', 0)
                                session['output_tokens'] += usage.get('output_tokens', 0)
                                session['cache_creation_tokens'] += usage.get('cache_creation_input_tokens', 0)
                                session['cache_read_tokens'] += usage.get('cache_read_input_tokens', 0)
                                
                                # Calculate total tokens
                                session['total_tokens'] = (
                                    session['input_tokens'] + session['output_tokens'] + 
                                    session['cache_creation_tokens'] + session['cache_read_tokens']
                                )
                                
                                # Estimate cost
                                input_cost = session['input_tokens'] * 0.0015 / 1000
                                output_cost = session['output_tokens'] * 0.0075 / 1000
                                cache_creation_cost = session['cache_creation_tokens'] * 0.00375 / 1000
                                cache_read_cost = session['cache_read_tokens'] * 0.00015 / 1000
                                session['total_cost'] = input_cost + output_cost + cache_creation_cost + cache_read_cost
                        
                    except json.JSONDecodeError:
                        continue
                        
        except IOError as e:
            logging.debug(f"Could not read {jsonl_file}: {e}")
    
    def _generate_abstract(self, user_prompt: str) -> str:
        """Generate a brief abstract from the first user prompt."""
        # Clean and truncate the prompt
        cleaned = user_prompt.strip()
        
        # Remove common prefixes
        prefixes_to_remove = [
            "please", "can you", "could you", "help me", "i need", "i want", "i would like"
        ]
        
        cleaned_lower = cleaned.lower()
        for prefix in prefixes_to_remove:
            if cleaned_lower.startswith(prefix):
                cleaned = cleaned[len(prefix):].lstrip()
                break
        
        # Take first sentence or first 100 characters
        sentences = cleaned.split('. ')
        if len(sentences) > 0 and len(sentences[0]) > 20:
            abstract = sentences[0].strip()
            if not abstract.endswith('.'):
                abstract += '.'
        else:
            abstract = cleaned[:100].strip()
            if len(cleaned) > 100:
                abstract += '...'
        
        # Capitalize first letter
        if abstract:
            abstract = abstract[0].upper() + abstract[1:]
        
        return abstract or "No description available"
    
    def _is_system_generated_content(self, content: str) -> bool:
        """Check if content is system-generated based on claude-code-exporter patterns."""
        if not isinstance(content, str):
            return True
        
        # Patterns from claude-code-exporter that indicate system-generated content
        skip_patterns = [
            'Caveat: The messages below were generated by the user while running local commands',
            '<command-name>',
            '<local-command-stdout>',
            '<local-command-stderr>',
            '<function_calls>',
            '<invoke',
            '</invoke>',
            'system-reminder>',
            '<system-reminder>',
            '<tool_use_error>',
            'Error:',
            '```bash',
            '```python'
        ]
        
        # Check if content contains any skip patterns
        content_lower = content.lower()
        for pattern in skip_patterns:
            if pattern.lower() in content_lower:
                return True
        
        # Check if content looks like tool results (JSON-like structures)
        trimmed = content.strip()
        if (trimmed.startswith('[') and trimmed.endswith(']')) or \
           (trimmed.startswith('{') and trimmed.endswith('}')):
            return True
        
        return False