"""
Semantic search functionality for ccwork using semantic-rag-py.
"""

import asyncio
import os
import json
import hashlib
from typing import List, Optional, Tuple, Dict, Any
from dataclasses import dataclass
from pathlib import Path
from datetime import datetime

# Import the semantic_rag package
try:
    from semantic_rag import (
        SemanticSearcher,
        SemanticRagConfig,
        SearchResult,
        CategorizedSearchResults
    )
    SEMANTIC_RAG_AVAILABLE = True
except ImportError as e:
    # Set defaults for when semantic_rag is not available
    SEMANTIC_RAG_AVAILABLE = False
    SemanticSearcher = None
    SemanticRagConfig = None
    SearchResult = None
    CategorizedSearchResults = None

from .core import WorkdirStatsCollector, SessionInfo


@dataclass
class SemanticSessionMatch:
    """A semantic search match for a session."""
    session: SessionInfo
    workdir: str
    score: float
    matched_content: str
    match_source: str  # 'prompt' or 'abstract'


class SemanticSessionSearcher:
    """Semantic search for Claude Code sessions using embeddings."""
    
    def __init__(self, config: Optional[Any] = None):
        """Initialize semantic searcher."""
        if not SEMANTIC_RAG_AVAILABLE:
            raise RuntimeError("Semantic RAG package not available. Please install semantic-rag-py dependencies.")
        
        if SemanticRagConfig is None:
            raise RuntimeError("SemanticRagConfig not available")
        self.config = config or SemanticRagConfig.from_env()
        self.searcher = SemanticSearcher(self.config)
        
        # Create a workspace for session embeddings in a temporary directory
        self.sessions_workspace = self._get_sessions_workspace()
    
    def _get_sessions_workspace(self) -> str:
        """Get workspace directory for session embeddings."""
        # Create a unique workspace path based on Claude directory
        home = Path.home()
        sessions_workspace = home / ".ccwork_semantic_cache" / "sessions"
        sessions_workspace.mkdir(parents=True, exist_ok=True)
        return str(sessions_workspace)
    
    async def initialize(self) -> Tuple[bool, Optional[str]]:
        """Initialize the semantic searcher."""
        try:
            # Initialize workspace
            await self.searcher.initialize_workspace(self.sessions_workspace)
            
            # Validate configuration
            valid, error = await self.searcher.validate_configuration()
            if not valid:
                return False, f"Configuration validation failed: {error}"
            
            return True, None
            
        except Exception as e:
            return False, f"Initialization failed: {e}"
    
    async def index_sessions(self, collector: WorkdirStatsCollector, force_reindex: bool = False) -> None:
        """Index all sessions for semantic search using vector embeddings."""
        print("🔄 Indexing sessions for semantic search...")
        
        # Get all sessions
        stats_list = collector.collect_all_workdir_stats()
        all_sessions = []
        
        for stats in stats_list:
            sessions = collector.get_workdir_sessions(stats.workdir)
            for session in sessions:
                all_sessions.append((stats.workdir, session))
        
        if not all_sessions:
            print("No sessions found to index.")
            return
        
        # Check if index already exists and is up to date
        index_file = Path(self.sessions_workspace) / "session_index.json"
        existing_index = {}
        
        if index_file.exists() and not force_reindex:
            try:
                with open(index_file, 'r') as f:
                    existing_index = json.load(f)
                print(f"📚 Found existing index with {len(existing_index)} sessions")
            except Exception as e:
                print(f"⚠️ Could not load existing index: {e}")
                existing_index = {}
        
        # Prepare session data for embedding
        sessions_to_embed = []
        session_metadata = []
        
        for workdir, session in all_sessions:
            session_key = f"{workdir}:{session.session_id}"
            
            # Check if session needs re-embedding
            if not force_reindex and session_key in existing_index:
                # Session already indexed, add to metadata
                index_entry = existing_index[session_key]
                session_metadata.extend(index_entry['metadata'])
                continue
            
            # Add session components for embedding
            if session.first_user_prompt:
                sessions_to_embed.append(session.first_user_prompt)
                session_metadata.append({
                    'session_key': session_key,
                    'session': session,
                    'workdir': workdir,
                    'content_type': 'prompt',
                    'content': session.first_user_prompt[:200] + ("..." if len(session.first_user_prompt) > 200 else ""),
                    'chunk_index': len(sessions_to_embed) - 1
                })
            
            if session.abstract:
                sessions_to_embed.append(session.abstract)
                session_metadata.append({
                    'session_key': session_key,
                    'session': session,
                    'workdir': workdir,
                    'content_type': 'abstract',
                    'content': session.abstract[:200] + ("..." if len(session.abstract) > 200 else ""),
                    'chunk_index': len(sessions_to_embed) - 1
                })
        
        # Generate embeddings for new sessions
        new_embeddings = []
        if sessions_to_embed:
            print(f"🧠 Generating embeddings for {len(sessions_to_embed)} new session chunks...")
            try:
                embedding_response = await self.searcher.embedder.create_embeddings(
                    sessions_to_embed, doc_type="text", task="retrieval_document"
                )
                
                if embedding_response.embeddings:
                    new_embeddings = embedding_response.embeddings
                    print(f"✅ Generated {len(new_embeddings)} embeddings")
                else:
                    print("❌ Failed to generate embeddings")
                    return
                    
            except Exception as e:
                print(f"❌ Error generating embeddings: {e}")
                return
        
        # Update index with new embeddings
        updated_index = existing_index.copy()
        embedding_idx = 0
        
        for metadata in session_metadata:
            if 'chunk_index' in metadata:  # New embedding
                session_key = metadata['session_key']
                
                if session_key not in updated_index:
                    updated_index[session_key] = {
                        'embeddings': [],
                        'metadata': []
                    }
                
                # Add embedding and metadata
                updated_index[session_key]['embeddings'].append(new_embeddings[embedding_idx])
                updated_index[session_key]['metadata'].append({
                    'session': metadata['session'],
                    'workdir': metadata['workdir'],
                    'content_type': metadata['content_type'],
                    'content': metadata['content']
                })
                embedding_idx += 1
        
        # Save updated index
        try:
            # Convert sessions to serializable format
            serializable_index = {}
            for session_key, data in updated_index.items():
                serializable_index[session_key] = {
                    'embeddings': data['embeddings'],
                    'metadata': []
                }
                
                for meta in data['metadata']:
                    # Handle both new metadata (has 'session' object) and existing metadata (already serialized)
                    if hasattr(meta, 'get') and 'session' in meta and hasattr(meta['session'], 'session_id'):
                        # New metadata with SessionInfo object
                        serializable_meta = {
                            'workdir': meta['workdir'],
                            'content_type': meta['content_type'],
                            'content': meta['content'],
                            'session_id': meta['session'].session_id,
                            'start_time': meta['session'].start_time.isoformat(),
                            'total_tokens': meta['session'].total_tokens,
                            'total_cost': meta['session'].total_cost,
                            'models_used': meta['session'].models_used or [],
                            'abstract': meta['session'].abstract,
                            'is_active': meta['session'].is_active
                        }
                    else:
                        # Already serialized metadata, just copy it
                        serializable_meta = meta
                    
                    serializable_index[session_key]['metadata'].append(serializable_meta)
            
            with open(index_file, 'w') as f:
                json.dump(serializable_index, f)
            
            print(f"💾 Saved session index with {len(updated_index)} sessions to {index_file}")
            
        except Exception as e:
            print(f"⚠️ Could not save index: {e}")
        
        print("✅ Session indexing completed!")
    
    async def search_sessions_semantic(
        self, 
        collector: WorkdirStatsCollector,
        query: str,
        max_results: int = 10,
        workdir_filter: Optional[str] = None
    ) -> List[SemanticSessionMatch]:
        """Search sessions using precomputed semantic index for inactive sessions and fallback for active sessions."""
        print(f"🔍 Performing hybrid search (embedding for inactive + keyword for active) for: '{query}'")
        
        # Load precomputed index
        index_file = Path(self.sessions_workspace) / "session_index.json"
        
        if not index_file.exists():
            print("📚 No precomputed index found. Creating index first...")
            await self.index_sessions(collector)
            
            if not index_file.exists():
                print("❌ Failed to create index, falling back to keyword search")
                return await self._fallback_keyword_search(collector, query, max_results, workdir_filter)
        
        # Load the index
        try:
            with open(index_file, 'r') as f:
                session_index = json.load(f)
            print(f"📚 Loaded index with {len(session_index)} sessions")
        except Exception as e:
            print(f"❌ Could not load index: {e}")
            return await self._fallback_keyword_search(collector, query, max_results, workdir_filter)
        
        try:
            # Step 1: Get all current sessions and separate active vs inactive
            all_sessions = []
            stats_list = collector.collect_all_workdir_stats()
            
            for stats in stats_list:
                if workdir_filter and stats.workdir != workdir_filter and stats.workdir != workdir_filter.replace(str(Path.cwd()), '.'):
                    continue
                sessions = collector.get_workdir_sessions(stats.workdir)
                for session in sessions:
                    all_sessions.append((session, stats.workdir))
            
            # Separate active and inactive sessions
            active_sessions = [(s, w) for s, w in all_sessions if s.is_active]
            inactive_sessions = [(s, w) for s, w in all_sessions if not s.is_active]
            
            print(f"📊 Found {len(inactive_sessions)} inactive sessions (using embeddings) + {len(active_sessions)} active sessions (using keywords)")
            
            # Step 2: Search inactive sessions using embeddings (precomputed index)
            semantic_matches = []
            
            # Generate query embedding for inactive sessions
            print(f"🧠 Generating query embedding for inactive sessions...")
            query_embedding_response = await self.searcher.embedder.create_embeddings(
                [query], doc_type="text", task="retrieval_query"
            )
            
            if not query_embedding_response.embeddings:
                raise Exception("Failed to generate query embedding")
            
            query_embedding = query_embedding_response.embeddings[0]
            
            # Calculate similarity scores using precomputed embeddings (inactive sessions only)
            import numpy as np
            
            query_vec = np.array(query_embedding)
            scores_and_metadata = []
            
            # Search through indexed sessions (all should be inactive when indexed)
            for session_key, session_data in session_index.items():
                workdir = session_key.split(':', 1)[0]
                
                # Apply workdir filtering if specified
                if workdir_filter and workdir != workdir_filter and workdir != workdir_filter.replace(str(Path.cwd()), '.'):
                    continue
                
                # Skip if this session is now active (shouldn't happen but safety check)
                session_id = session_key.split(':', 1)[1]
                is_now_active = any(s.session_id == session_id and s.is_active for s, _ in all_sessions)
                if is_now_active:
                    continue
                
                # Compare query with each chunk embedding for this session
                session_embeddings = session_data['embeddings']
                session_metadata = session_data['metadata']
                
                best_similarity = 0.0
                best_metadata = None
                
                for i, session_embedding in enumerate(session_embeddings):
                    session_vec = np.array(session_embedding)
                    
                    # Calculate cosine similarity
                    dot_product = np.dot(query_vec, session_vec)
                    norms = np.linalg.norm(query_vec) * np.linalg.norm(session_vec)
                    
                    if norms > 0:
                        cosine_similarity = dot_product / norms
                        # Convert to 0-1 range (cosine similarity is -1 to 1)
                        similarity_score = (cosine_similarity + 1) / 2
                    else:
                        similarity_score = 0.0
                    
                    if similarity_score > best_similarity:
                        best_similarity = similarity_score
                        best_metadata = session_metadata[i]
                
                # Only include reasonably relevant results
                if best_similarity > 0.3:
                    scores_and_metadata.append((best_similarity, best_metadata))
            
            # Sort by similarity score (highest first)
            scores_and_metadata.sort(key=lambda x: x[0], reverse=True)
            
            # Create results from inactive sessions (embedding-based)
            inactive_results = []
            for score, metadata in scores_and_metadata[:max_results//2 if active_sessions else max_results]:
                # Reconstruct session object from metadata
                from .core import SessionInfo
                session = SessionInfo(
                    session_id=metadata['session_id'],
                    start_time=datetime.fromisoformat(metadata['start_time']),
                    end_time=None,  # Not stored in index
                    is_active=False,  # These are inactive sessions
                    total_tokens=metadata['total_tokens'],
                    input_tokens=0,  # Not stored in index for space
                    output_tokens=0,  # Not stored in index for space  
                    cache_creation_tokens=0,  # Not stored in index for space
                    cache_read_tokens=0,  # Not stored in index for space
                    total_cost=metadata['total_cost'],
                    models_used=metadata['models_used'],
                    first_user_prompt="",  # Not stored in index for space
                    abstract=metadata.get('abstract')
                )
                
                inactive_results.append(SemanticSessionMatch(
                    session=session,
                    workdir=metadata['workdir'],
                    score=score,
                    matched_content=metadata['content'],
                    match_source=f"🔍 {metadata['content_type']} (embedding)"
                ))
            
            # Step 3: Search active sessions using keyword matching
            active_results = []
            if active_sessions:
                print(f"🔤 Searching {len(active_sessions)} active sessions using keyword matching...")
                
                query_lower = query.lower()
                query_words = set(query_lower.split())
                
                for session, workdir in active_sessions:
                    if not session.first_user_prompt and not session.abstract:
                        continue
                    
                    # Calculate keyword-based score
                    score = self._calculate_keyword_score(query_lower, query_words, session)
                    
                    if score > 0.1:  # Lower threshold for active sessions
                        matched_content = session.first_user_prompt or session.abstract or ""
                        active_results.append(SemanticSessionMatch(
                            session=session,
                            workdir=workdir,
                            score=score * 0.8,  # Slightly lower score to indicate keyword-based
                            matched_content=matched_content[:200],  # Truncate for display
                            match_source="🔤 prompt (keyword)"
                        ))
                
                # Sort active results by score
                active_results.sort(key=lambda x: x.score, reverse=True)
                active_results = active_results[:max_results//2 if inactive_results else max_results]
            
            # Step 4: Combine and return results
            all_results = inactive_results + active_results
            all_results.sort(key=lambda x: x.score, reverse=True)
            final_results = all_results[:max_results]
            
            print(f"✅ Found {len(inactive_results)} inactive (embedding) + {len(active_results)} active (keyword) = {len(final_results)} total results")
            return final_results
            
        except Exception as e:
            print(f"⚠️ Index-based search failed: {e}")
            print("Falling back to enhanced keyword search...")
            return await self._fallback_keyword_search(collector, query, max_results, workdir_filter)
    
    async def _fallback_keyword_search(
        self,
        collector: WorkdirStatsCollector,
        query: str,
        max_results: int = 10,
        workdir_filter: Optional[str] = None
    ) -> List[SemanticSessionMatch]:
        """Fallback to enhanced keyword search when embeddings fail."""
        stats_list = collector.collect_all_workdir_stats()
        
        # Apply workdir filtering if specified
        if workdir_filter:
            from pathlib import Path
            stats_list = [s for s in stats_list if s.workdir == workdir_filter or s.workdir == workdir_filter.replace(str(Path.cwd()), '.')]
        
        scored_sessions = []
        
        # Get query terms for keyword matching
        query_lower = query.lower()
        query_words = [word.strip() for word in query_lower.split() if len(word.strip()) > 2]
        
        for stats in stats_list:
            sessions = collector.get_workdir_sessions(stats.workdir)
            for session in sessions:
                # Score based on content similarity
                text_sources = []
                if session.first_user_prompt:
                    text_sources.append(("prompt", session.first_user_prompt.lower()))
                if session.abstract:
                    text_sources.append(("abstract", session.abstract.lower()))
                
                best_match_score = 0
                best_match_content = ""
                best_match_source = ""
                
                for source_type, text in text_sources:
                    current_score = 0
                    
                    # Exact phrase match (highest score)
                    if query_lower in text:
                        current_score += len(query_words) * 3
                    
                    # Individual word matches
                    for word in query_words:
                        if word in text:
                            current_score += 1
                        
                        # Partial matches (substring)
                        if len(word) > 4:
                            for text_word in text.split():
                                if word in text_word or text_word in word:
                                    current_score += 0.5
                    
                    # Bonus for semantic similarity (basic implementation)
                    semantic_bonus = self._calculate_semantic_bonus(query_lower, text, query_words)
                    current_score += semantic_bonus
                    
                    if current_score > best_match_score:
                        best_match_score = current_score
                        best_match_content = text[:200] + ("..." if len(text) > 200 else "")
                        best_match_source = source_type
                
                if best_match_score > 0:
                    # Normalize score to 0-1 range but make it more granular
                    max_possible_score = len(query_words) * 4 + 2  # Account for exact matches and semantic bonus
                    normalized_score = min(best_match_score / max_possible_score, 0.95)  # Cap below 1.0
                    
                    scored_sessions.append(SemanticSessionMatch(
                        session=session,
                        workdir=stats.workdir,
                        score=normalized_score,
                        matched_content=best_match_content,
                        match_source=best_match_source
                    ))
        
        # Sort by score (highest first) and limit results
        scored_sessions.sort(key=lambda x: x.score, reverse=True)
        return scored_sessions[:max_results]
    
    def _calculate_semantic_bonus(self, query: str, text: str, query_words: List[str]) -> float:
        """Calculate semantic similarity bonus using simple heuristics."""
        bonus = 0.0
        
        # Define some basic semantic relationships
        semantic_groups = {
            'auth': ['login', 'authentication', 'signin', 'password', 'token', 'session', 'user'],
            'database': ['db', 'sql', 'query', 'table', 'connection', 'orm', 'schema'],
            'api': ['endpoint', 'rest', 'graphql', 'request', 'response', 'http', 'server'],
            'ui': ['interface', 'component', 'render', 'display', 'view', 'template', 'frontend'],
            'error': ['bug', 'exception', 'crash', 'fail', 'problem', 'issue', 'debug'],
            'test': ['testing', 'unittest', 'spec', 'mock', 'assert', 'coverage'],
            'config': ['configuration', 'settings', 'environment', 'env', 'setup', 'init'],
            'deploy': ['deployment', 'build', 'release', 'production', 'staging', 'docker'],
        }
        
        # Check if query words belong to semantic groups
        for query_word in query_words:
            for group_name, related_words in semantic_groups.items():
                if query_word in related_words:
                    # Check if text contains other words from the same semantic group
                    for related_word in related_words:
                        if related_word != query_word and related_word in text:
                            bonus += 0.3
        
        return bonus
    
    def get_index_status(self, collector: WorkdirStatsCollector) -> Dict[str, Any]:
        """Get detailed status information about the semantic search index."""
        index_file = Path(self.sessions_workspace) / "session_index.json"
        
        status = {
            "index_exists": False,
            "index_file_path": str(index_file),
            "index_file_size": 0,
            "indexed_sessions": 0,
            "indexed_chunks": 0,
            "embedding_dimensions": 0,
            "last_modified": None,
            "current_sessions": 0,
            "missing_sessions": [],
            "workdir_distribution": {},
            "content_type_distribution": {"prompt": 0, "abstract": 0},
            "semantic_rag_available": SEMANTIC_RAG_AVAILABLE,
            "configuration": {
                "embedder_provider": self.config.embedder_provider if hasattr(self, 'config') else "unknown",
                "workspace_path": self.sessions_workspace
            }
        }
        
        # Check if index file exists
        if index_file.exists():
            status["index_exists"] = True
            status["index_file_size"] = index_file.stat().st_size
            status["last_modified"] = datetime.fromtimestamp(index_file.stat().st_mtime).isoformat()
            
            # Load and analyze index
            try:
                with open(index_file, 'r') as f:
                    index_data = json.load(f)
                
                status["indexed_sessions"] = len(index_data)
                
                total_chunks = 0
                embedding_dims = None
                
                for session_key, session_data in index_data.items():
                    workdir = session_key.split(':', 1)[0]
                    
                    # Count workdir distribution
                    if workdir not in status["workdir_distribution"]:
                        status["workdir_distribution"][workdir] = 0
                    status["workdir_distribution"][workdir] += 1
                    
                    # Count chunks and analyze embeddings
                    session_chunks = len(session_data.get('embeddings', []))
                    total_chunks += session_chunks
                    
                    # Get embedding dimensions from first embedding
                    if embedding_dims is None and session_data.get('embeddings'):
                        embedding_dims = len(session_data['embeddings'][0])
                    
                    # Count content types
                    for metadata in session_data.get('metadata', []):
                        content_type = metadata.get('content_type', 'unknown')
                        if content_type in status["content_type_distribution"]:
                            status["content_type_distribution"][content_type] += 1
                
                status["indexed_chunks"] = total_chunks
                status["embedding_dimensions"] = embedding_dims or 0
                
            except Exception as e:
                status["index_error"] = str(e)
        
        # Get current session count for comparison
        stats_list = collector.collect_all_workdir_stats()
        current_sessions = []
        
        for stats in stats_list:
            sessions = collector.get_workdir_sessions(stats.workdir)
            for session in sessions:
                session_key = f"{stats.workdir}:{session.session_id}"
                current_sessions.append(session_key)
        
        status["current_sessions"] = len(current_sessions)
        
        # Find sessions that are not indexed
        if status["index_exists"] and "index_error" not in status:
            try:
                with open(index_file, 'r') as f:
                    index_data = json.load(f)
                
                indexed_session_keys = set(index_data.keys())
                current_session_keys = set(current_sessions)
                
                missing_sessions = current_session_keys - indexed_session_keys
                status["missing_sessions"] = list(missing_sessions)
                
            except Exception:
                pass
        else:
            status["missing_sessions"] = current_sessions
        
        return status

    def _calculate_keyword_score(self, query_lower: str, query_words: set, session: SessionInfo) -> float:
        """Calculate keyword-based score for active sessions."""
        text_to_search = []
        
        if session.first_user_prompt:
            text_to_search.append(session.first_user_prompt.lower())
        if session.abstract:
            text_to_search.append(session.abstract.lower())
            
        if not text_to_search:
            return 0.0
            
        text = " ".join(text_to_search)
        
        # Calculate score based on keyword matches
        score = 0.0
        
        # Exact phrase match (highest weight)
        if query_lower in text:
            score += 0.5
            
        # Individual word matches
        text_words = set(text.split())
        matching_words = query_words & text_words
        if query_words:
            word_match_ratio = len(matching_words) / len(query_words)
            score += word_match_ratio * 0.3
            
        # Partial word matches (for typos or variations)
        for query_word in query_words:
            for text_word in text_words:
                if len(query_word) > 3 and query_word in text_word:
                    score += 0.1
                    break
                    
        return min(score, 1.0)  # Cap at 1.0


def create_semantic_searcher() -> Optional[SemanticSessionSearcher]:
    """Create semantic searcher if dependencies are available."""
    if not SEMANTIC_RAG_AVAILABLE or SemanticRagConfig is None:
        return None
    
    try:
        config = SemanticRagConfig.from_env()
        return SemanticSessionSearcher(config)
    except Exception as e:
        print(f"Failed to create semantic searcher: {e}")
        return None