"""RAG indexer for code repositories."""

import json
import hashlib
from pathlib import Path
from typing import List, Dict, Any, Optional
from datetime import datetime

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.schema import Document
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn

from .config import Config
from .code_parser import CodeParser

console = Console()

class CodeChunk:
    """Represents a chunk of code with metadata."""
    
    def __init__(self, content: str, metadata: Dict[str, Any]):
        self.content = content
        self.metadata = metadata
        self.chunk_id = self._generate_id()
    
    def _generate_id(self) -> str:
        """Generate unique ID for the chunk."""
        content_hash = hashlib.md5(self.content.encode()).hexdigest()
        file_path = self.metadata.get('file_path', '')
        return f"{file_path}:{content_hash}"

class RAGIndexer:
    """Indexes code repositories for RAG queries."""
    
    def __init__(self):
        self.embeddings = HuggingFaceEmbeddings(model_name=Config.EMBEDDING_MODEL)
        self.vector_store = Chroma(
            persist_directory=Config.CHROMA_PERSIST_DIR,
            embedding_function=self.embeddings
        )
        self.parser = CodeParser()
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=Config.CHUNK_SIZE,
            chunk_overlap=Config.CHUNK_OVERLAP,
            separators=["\n\n", "\n", " ", ""]
        )
    
    def index_repository(self, repo_path: Path, repo_url: str) -> int:
        """
        Index an entire repository.
        
        Args:
            repo_path: Local path to the repository
            repo_url: Original GitHub URL
            
        Returns:
            Number of chunks indexed
        """
        console.print(f"[blue]Indexing repository: {repo_url}[/blue]")
        
        all_chunks = []
        
        with Progress(
            SpinnerColumn(),
            TextColumn("[progress.description]{task.description}"),
            console=console,
        ) as progress:
            task = progress.add_task("Processing files...", total=None)
            
            # Find all source files
            source_files = self._find_source_files(repo_path)
            progress.update(task, total=len(source_files))
            
            for file_path in source_files:
                try:
                    chunks = self._process_file(file_path, repo_path, repo_url)
                    all_chunks.extend(chunks)
                    progress.advance(task)
                except Exception as e:
                    console.print(f"[red]Error processing {file_path}: {e}[/red]")
        
        if all_chunks:
            # Create documents for vector store
            documents = [
                Document(
                    page_content=chunk.content,
                    metadata=chunk.metadata
                )
                for chunk in all_chunks
            ]
            
            # Add to vector store
            self.vector_store.add_documents(documents)
            console.print(f"[green]Indexed {len(all_chunks)} chunks[/green]")
        
        return len(all_chunks)
    
    def _find_source_files(self, repo_path: Path) -> List[Path]:
        """Find all source files in the repository."""
        source_files = []
        
        for extension in Config.SUPPORTED_EXTENSIONS:
            source_files.extend(repo_path.rglob(f"*{extension}"))
        
        # Filter by file size
        filtered_files = []
        for file_path in source_files:
            try:
                if file_path.stat().st_size <= Config.MAX_FILE_SIZE:
                    filtered_files.append(file_path)
            except OSError:
                continue
        
        return filtered_files
    
    def _process_file(self, file_path: Path, repo_path: Path, repo_url: str) -> List[CodeChunk]:
        """Process a single source file."""
        try:
            content = file_path.read_text(encoding='utf-8')
            
            # Get relative path
            relative_path = file_path.relative_to(repo_path)
            
            # Parse code structure
            parsed_data = self.parser.parse_file(file_path)
            
            chunks = []
            
            # Create chunks from the file
            if parsed_data:
                # Create structured chunks for different elements
                for func in parsed_data.get('functions', []):
                    chunk_content = self._extract_function_content(content, func)
                    metadata = {
                        'type': 'function',
                        'name': func['name'],
                        'file_path': str(relative_path),
                        'repo_url': repo_url,
                        'language': parsed_data['language'],
                        'start_line': func['start_line'],
                        'end_line': func['end_line'],
                        'docstring': func.get('docstring', ''),
                        'params': json.dumps(func.get('params', []))
                    }
                    chunks.append(CodeChunk(chunk_content, metadata))
                
                for cls in parsed_data.get('classes', []):
                    chunk_content = self._extract_class_content(content, cls)
                    metadata = {
                        'type': 'class',
                        'name': cls['name'],
                        'file_path': str(relative_path),
                        'repo_url': repo_url,
                        'language': parsed_data['language'],
                        'start_line': cls['start_line'],
                        'end_line': cls['end_line'],
                        'docstring': cls.get('docstring', ''),
                        'methods': json.dumps([m['name'] for m in cls.get('methods', [])])
                    }
                    chunks.append(CodeChunk(chunk_content, metadata))
                
                # Add import statements
                for imp in parsed_data.get('imports', []):
                    metadata = {
                        'type': 'import',
                        'statement': imp['statement'],
                        'file_path': str(relative_path),
                        'repo_url': repo_url,
                        'language': parsed_data['language'],
                        'line': imp['line']
                    }
                    chunks.append(CodeChunk(imp['statement'], metadata))
            
            # Also create general file chunks for context
            file_metadata = {
                'type': 'file',
                'file_path': str(relative_path),
                'repo_url': repo_url,
                'language': file_path.suffix.lstrip('.'),
                'size': len(content)
            }
            
            # Split the entire file into chunks
            text_chunks = self.text_splitter.split_text(content)
            for i, chunk_text in enumerate(text_chunks):
                chunk_metadata = file_metadata.copy()
                chunk_metadata['chunk_index'] = i
                chunks.append(CodeChunk(chunk_text, chunk_metadata))
            
            return chunks
            
        except Exception as e:
            console.print(f"[red]Error processing {file_path}: {e}[/red]")
            return []
    
    def _extract_function_content(self, content: str, func: Dict[str, Any]) -> str:
        """Extract the full content of a function."""
        lines = content.split('\n')
        start_line = func['start_line'] - 1
        end_line = func['end_line']
        return '\n'.join(lines[start_line:end_line])
    
    def _extract_class_content(self, content: str, cls: Dict[str, Any]) -> str:
        """Extract the full content of a class."""
        lines = content.split('\n')
        start_line = cls['start_line'] - 1
        end_line = cls['end_line']
        return '\n'.join(lines[start_line:end_line])
    
    def query_similar_chunks(self, query: str, k: int = 5, filter_dict: Optional[Dict] = None) -> List[Document]:
        """
        Find similar code chunks based on a query.
        
        Args:
            query: Natural language query
            k: Number of results to return
            filter_dict: Optional filters for metadata
            
        Returns:
            List of relevant documents
        """
        try:
            kwargs = {"k": k}
            if filter_dict:
                kwargs["filter"] = filter_dict
            
            results = self.vector_store.similarity_search(query, **kwargs)
            return results
            
        except Exception as e:
            console.print(f"[red]Error querying vector store: {e}[/red]")
            return []
    
    def search_functions(self, function_name: str, repo_url: Optional[str] = None) -> List[Document]:
        """Search for functions by name."""
        filter_dict = {"type": "function"}
        if repo_url:
            filter_dict["repo_url"] = repo_url
        
        return self.query_similar_chunks(
            f"function {function_name}",
            k=10,
            filter_dict=filter_dict
        )
    
    def search_classes(self, class_name: str, repo_url: Optional[str] = None) -> List[Document]:
        """Search for classes by name."""
        filter_dict = {"type": "class"}
        if repo_url:
            filter_dict["repo_url"] = repo_url
        
        return self.query_similar_chunks(
            f"class {class_name}",
            k=10,
            filter_dict=filter_dict
        )
    
    def search_imports(self, module_name: str, repo_url: Optional[str] = None) -> List[Document]:
        """Search for import statements."""
        filter_dict = {"type": "import"}
        if repo_url:
            filter_dict["repo_url"] = repo_url
        
        return self.query_similar_chunks(
            f"import {module_name}",
            k=10,
            filter_dict=filter_dict
        )
    
    def get_repository_stats(self, repo_url: str) -> Dict[str, Any]:
        """Get statistics for a repository."""
        try:
            # Count documents for this repository
            all_docs = self.vector_store.get()
            
            repo_docs = [
                doc for doc, metadata in zip(all_docs['documents'], all_docs['metadatas'])
                if metadata.get('repo_url') == repo_url
            ]
            
            if not repo_docs:
                return {"indexed": False, "chunks": 0}
            
            return {
                "indexed": True,
                "chunks": len(repo_docs),
                "repo_url": repo_url
            }
            
        except Exception:
            return {"indexed": False, "chunks": 0}
    
    def remove_repository(self, repo_url: str) -> bool:
        """Remove all chunks for a repository."""
        try:
            # Get all documents for this repository
            all_docs = self.vector_store.get()
            
            ids_to_delete = [
                doc_id for doc_id, metadata in zip(all_docs['ids'], all_docs['metadatas'])
                if metadata.get('repo_url') == repo_url
            ]
            
            if ids_to_delete:
                self.vector_store.delete(ids_to_delete)
                console.print(f"[green]Removed {len(ids_to_delete)} chunks for {repo_url}[/green]")
                return True
            
            return False
            
        except Exception as e:
            console.print(f"[red]Error removing repository: {e}[/red]")
            return False