#!/usr/bin/env python3
"""
search-text.py - A powerful text search tool for local documents and research papers.

This script enables searching through text files and research papers with both keyword-based and semantic search capabilities.
It is designed to work seamlessly with the SERP Search project structure, allowing users to search within project-specific directories.
Key features include:
- Keyword search with context highlighting
- Semantic search using embeddings for conceptual matching
- Optional reranking of results for improved relevance
- Support for project-specific directories (project/proj_name/papers)
- Customizable chunking and output options

Use this tool to quickly find relevant information in your local document repository, especially useful for academic research and literature reviews.
"""
import os
import argparse
import re
import hashlib
import pickle
from pathlib import Path
import textwrap
import sys

# Print environment information
print(f"Python version: {sys.version}")
print(f"System platform: {sys.platform}")
try:
    import torch
    print(f"PyTorch version: {torch.__version__}")
    print(f"CUDA available: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        print(f"CUDA version: {torch.version.cuda}")
        print(f"GPU: {torch.cuda.get_device_name(0)}")
except ImportError:
    print("PyTorch not installed")

# For semantic search
try:
    from sentence_transformers import SentenceTransformer, CrossEncoder
    import numpy as np
    HAVE_EMBEDDING = True
    print("Successfully imported sentence_transformers module")
except ImportError as e:
    print(f"Warning: sentence_transformers import failed: {e}")
    HAVE_EMBEDDING = False

# Check if reranking is available
try:
    from sentence_transformers import CrossEncoder
    HAVE_RERANKER = True
    print("Successfully imported CrossEncoder for reranking")
except ImportError as e:
    print(f"Warning: CrossEncoder import failed: {e}")
    HAVE_RERANKER = False

def load_text_files(directory, specific_file=None):
    """Load text files from the specified directory or a specific file."""
    # If specific file is provided, only load that file
    if specific_file:
        file_path = Path(specific_file)
        if not file_path.exists():
            print(f"Error: File {file_path} not found")
            return {}
        
        try:
            with open(file_path, "r", encoding="utf-8") as file:
                return {file_path: file.read()}
        except Exception as e:
            print(f"Error reading {file_path}: {e}")
            return {}
    
    # Otherwise, load all text files from directory and subdirectories
    text_dir = Path(directory)
    if not text_dir.exists():
        print(f"Error: {text_dir} directory not found")
        return {}
    
    text_files = {}
    
    # Common text file extensions to look for
    text_extensions = ['.txt', '.md', '.html', '.json', '.csv', '.xml', '.py', '.js', '.css', '.tex']
    
    # Walk through all subdirectories recursively
    for file_path in text_dir.glob('**/*'):
        if file_path.is_file() and (file_path.suffix.lower() in text_extensions or is_text_file(file_path)):
            try:
                with open(file_path, "r", encoding="utf-8") as file:
                    text_files[file_path] = file.read()
                    print(f"Loaded: {file_path}")
            except UnicodeDecodeError:
                # Skip files that cannot be decoded as UTF-8
                continue
            except Exception as e:
                print(f"Error reading {file_path}: {e}")
    
    return text_files

def is_text_file(file_path):
    """Try to determine if a file is a text file by looking at its content."""
    try:
        # Read the first 8KB of the file
        with open(file_path, 'rb') as f:
            data = f.read(8192)
            
        # Check if the data contains null bytes (common in binary files)
        if b'\x00' in data:
            return False
            
        # Try to decode as utf-8
        data.decode('utf-8')
        return True
    except:
        return False

def keyword_search(text_files, query, context_size=100, phrase_mode=True):
    """
    Search for keywords in text files and return matches with context.
    
    Args:
        text_files: Dictionary of file paths to file contents
        query: Search query string
        context_size: Size of context to include around matches
        phrase_mode: If True, treat multi-word query as a single phrase; otherwise split into terms
    """
    results = []
    
    # If phrase_mode is True and query has multiple words, treat as a single phrase
    if phrase_mode and ' ' in query.strip():
        query_terms = [query.strip()]
    else:
        # Otherwise split query into terms
        query_terms = query.split()
    
    for file_path, content in text_files.items():
        file_matches = []
        
        for term in query_terms:
            pattern = re.compile(re.escape(term), re.IGNORECASE)
            for match in pattern.finditer(content):
                start_pos = max(0, match.start() - context_size)
                end_pos = min(len(content), match.end() + context_size)
                
                # Get the context around the match
                context = content[start_pos:end_pos]
                
                # Format the context to highlight the match
                match_text = match.group(0)
                highlighted_context = context.replace(match_text, f"**{match_text}**")
                
                file_matches.append({
                    "term": term,
                    "position": match.start(),
                    "context": highlighted_context,
                    "score": 1.0  # For keyword matches, score is always 1.0
                })
        
        if file_matches:
            # Sort by position to maintain document order
            file_matches.sort(key=lambda x: x["position"])
            results.append({
                "file_path": file_path,
                "matches": file_matches
            })
    
    return results

def split_markdown_into_chunks(content, max_chunk_size=512):
    """
    Split markdown content into chunks based on sections.
    - Short sections (<512 chars) are kept as single chunks
    - Longer sections are split into multiple chunks with each less than max_chunk_size
    - Chunks preserve complete paragraphs
    - Uses a complete sentence as overlap between chunks
    - Filters out content after ACKNOWLEDGMENT or REFERENCES sections
    """
    # Filter out content after ACKNOWLEDGMENT or REFERENCES sections
    acknowledgment_pattern = re.compile(r'#+\s*acknowledgm?ents?', re.IGNORECASE)
    references_pattern = re.compile(r'#+\s*references?', re.IGNORECASE)
    
    # Find positions of these sections
    ack_match = acknowledgment_pattern.search(content)
    ref_match = references_pattern.search(content)
    
    cutoff_pos = len(content)
    
    # Use the earlier section as the cutoff point
    if ack_match:
        cutoff_pos = ack_match.start()
    
    if ref_match and (not ack_match or ref_match.start() < cutoff_pos):
        cutoff_pos = ref_match.start()
    
    # Truncate content at the cutoff point
    filtered_content = content[:cutoff_pos].strip()
    
    # If nothing is left after filtering, use the original content
    if not filtered_content and cutoff_pos < len(content):
        print("Warning: Entire content would be filtered out. Using full content instead.")
        filtered_content = content
    
    # Split content by markdown headers (##, ###, etc.)
    section_pattern = re.compile(r'(#{1,6}\s+.*?)(?=\n#{1,6}\s+|\Z)', re.DOTALL)
    sections = section_pattern.findall(filtered_content)
    
    # If no sections found, treat the whole content as one section
    if not sections:
        sections = [filtered_content]
    
    chunks = []
    chunk_positions = []
    
    for section in sections:
        if len(section) <= max_chunk_size:
            # Short section, keep as single chunk
            chunks.append(section)
            chunk_positions.append(filtered_content.find(section))
        else:
            # Long section, split into chunks while preserving paragraphs
            paragraphs = re.split(r'\n\s*\n', section)
            current_chunk = []
            current_length = 0
            last_sentence = ""
            
            for para in paragraphs:
                # If adding this paragraph would exceed max size, 
                # store current chunk and start a new one
                if current_length + len(para) > max_chunk_size and current_chunk:
                    chunk_text = '\n\n'.join(current_chunk)
                    chunks.append(chunk_text)
                    chunk_positions.append(filtered_content.find(chunk_text))
                    
                    # Start new chunk with the last sentence as overlap
                    if last_sentence:
                        current_chunk = [last_sentence]
                        current_length = len(last_sentence)
                    else:
                        current_chunk = []
                        current_length = 0
                
                # Split paragraph into sentences for potential overlap
                sentences = re.split(r'(?<=[.!?])\s+', para)
                if sentences:
                    last_sentence = sentences[-1]
                
                current_chunk.append(para)
                current_length += len(para) + 2  # +2 for the newlines
            
            # Add the final chunk if there's anything left
            if current_chunk:
                chunk_text = '\n\n'.join(current_chunk)
                chunks.append(chunk_text)
                chunk_positions.append(filtered_content.find(chunk_text))
    
    return chunks, chunk_positions

def embedding_search(text_files, query, chunk_size=512, overlap=50, similarity_threshold=0.3, clean=False, batch_size=32):
    """Search for semantically similar text using embeddings."""
    if not HAVE_EMBEDDING:
        print("Error: sentence_transformers is not installed. Run 'pip install sentence-transformers'")
        return []
    
    # Initialize model variable at the global scope within this function
    global model
    
    # Initialize results list
    results = []
    
    # Load model if needed (load once and reuse)
    print(f"Loading embedding model 'BAAI/bge-m3'...")
    model = SentenceTransformer("BAAI/bge-m3")
    
    # Compute query embedding
    query_embedding = model.encode(query)
    
    # Create a cache directory if it doesn't exist
    cache_dir = Path("embedding_cache")
    cache_dir.mkdir(exist_ok=True)
    
    # Create a hash of the text file contents for cache identification
    content_hash = hashlib.md5(str(sorted([(str(k), len(v)) for k, v in text_files.items()])).encode()).hexdigest()
    
    # Process each file
    for file_path, content in text_files.items():
        print(f"Processing {file_path.name} with semantic search...")
        file_matches = []
        
        # Use markdown-specific chunking for markdown files
        if file_path.suffix.lower() == '.md':
            chunks, chunk_positions = split_markdown_into_chunks(content, chunk_size)
        else:
            # Standard chunking for non-markdown files
            chunks = []
            chunk_positions = []
            
            for i in range(0, len(content) - overlap, chunk_size - overlap):
                chunk = content[i:i + chunk_size]
                if len(chunk.strip()) > 0:  # Skip empty chunks
                    chunks.append(chunk)
                    chunk_positions.append(i)
        
        if not chunks:
            continue
        
        # Cache filename based on file path and content length
        cache_file = cache_dir / f"{file_path.name}_{hashlib.md5(str(file_path).encode()).hexdigest()}_{len(content)}.pkl"
        
        # Try to load cached embeddings if available and clean flag is not set
        chunk_embeddings = None
        if not clean and cache_file.exists():
            try:
                with open(cache_file, 'rb') as f:
                    cached_data = pickle.load(f)
                    if cached_data.get('chunks') == chunks:  # Verify chunks match
                        print(f"Using cached embeddings for {file_path.name}")
                        chunk_embeddings = cached_data.get('embeddings')
            except Exception as e:
                print(f"Error loading cache: {e}")
        
        # If no valid cache or clean flag is set, calculate embeddings in batches
        if chunk_embeddings is None:
            print(f"Calculating embeddings for {file_path.name} in batches (batch size: {batch_size})...")
            
            # Process chunks in batches to avoid memory issues
            chunk_embeddings = []
            for i in range(0, len(chunks), batch_size):
                batch = chunks[i:i + batch_size]
                print(f"  Processing batch {i//batch_size + 1}/{(len(chunks) + batch_size - 1)//batch_size} ({len(batch)} chunks)")
                try:
                    batch_embeddings = model.encode(batch)
                    chunk_embeddings.extend(batch_embeddings)
                except Exception as e:
                    print(f"Error encoding batch: {e}")
                    return []
            
            # Convert to numpy array
            chunk_embeddings = np.array(chunk_embeddings)
            
            # Save to cache
            try:
                with open(cache_file, 'wb') as f:
                    pickle.dump({'chunks': chunks, 'embeddings': chunk_embeddings}, f)
            except Exception as e:
                print(f"Error saving cache: {e}")
            
        # Calculate similarities between query and all chunks
        similarities = np.dot(chunk_embeddings, query_embedding) / (
            np.linalg.norm(chunk_embeddings, axis=1) * np.linalg.norm(query_embedding)
        )
        
        # We don't need the model anymore, so free its memory
        try:
            import torch
            import gc
            # Delete references to model to free memory
            if 'model' in locals():
                del model
            gc.collect()
            torch.cuda.empty_cache()
        except (ImportError, AttributeError):
            pass
        
        # Find chunks with similarity above threshold
        for i, similarity in enumerate(similarities):
            if similarity >= similarity_threshold:
                file_matches.append({
                    "term": "semantic match",
                    "position": chunk_positions[i],
                    "context": chunks[i],
                    "score": float(similarity)
                })
        
        if file_matches:
            # Sort by semantic similarity score (highest first)
            file_matches.sort(key=lambda x: x["score"], reverse=True)
            results.append({
                "file_path": file_path,
                "matches": file_matches
            })
    
    return results

def rerank_results(text_files, query, embedding_results, similarity_threshold=0.5, batch_size=16, max_chunks=300):
    """
    Rerank search results using bge-reranker-v2-m3 model with hybrid scoring.
    
    This function uses the bge-reranker-v2-m3 model to rerank search results from the embedding search.
    It combines two signals with equal weights (0.5 each):
    1. Semantic similarity from the reranker model (50%)
    2. Keyword matching presence in the text (50%)
    
    The hybrid scoring helps to balance semantic understanding with the presence of specific keywords.
    
    Args:
        text_files: Dictionary of file paths to file contents
        query: Search query string
        embedding_results: Results from the embedding search
        similarity_threshold: Threshold for semantic similarity
        batch_size: Batch size for processing reranker inputs
        max_chunks: Maximum number of chunks to rerank (default: 300)
        
    Returns:
        Updated search results with reranked scores
    """
    # Free GPU memory used by embedding model
    try:
        import torch
        import gc
        print("Cleaning up GPU memory before reranking...")
        gc.collect()
        torch.cuda.empty_cache()
    except (ImportError, AttributeError):
        print("GPU memory cleanup not available (torch not installed or no CUDA)")
    
    if not HAVE_RERANKER:
        print("Error: sentence_transformers is not installed or doesn't support CrossEncoder. Run 'pip install sentence-transformers'")
        return embedding_results
    
    # Return original results if there are no results to rerank
    if not embedding_results:
        return embedding_results
    
    print(f"Loading reranker model 'BAAI/bge-reranker-v2-m3'...")
    reranker = CrossEncoder('BAAI/bge-reranker-v2-m3')

    # Add another memory cleanup after loading the model
    try:
        import torch
        import gc
        gc.collect()
        torch.cuda.empty_cache()
    except (ImportError, AttributeError):
        pass
    
    # Extract all unique chunks across all files for reranking
    all_chunks = []
    chunk_info = []  # Store (file_idx, match_idx, score) for each chunk
    
    # Process embedding results
    for file_idx, file_result in enumerate(embedding_results):
        file_path = file_result["file_path"]
        matches = file_result["matches"]
        
        for match_idx, match in enumerate(matches):
            context = match["context"]
            score = match["score"]
            chunk_info.append((file_idx, match_idx, score, context))
    
    # Sort chunks by their initial score (highest first)
    chunk_info.sort(key=lambda x: x[2], reverse=True)
    
    # Limit to max_chunks
    if len(chunk_info) > max_chunks:
        print(f"Limiting reranking to top {max_chunks} chunks (out of {len(chunk_info)} total)")
        chunk_info = chunk_info[:max_chunks]
    
    # Create the chunks and mapping for reranking
    chunk_to_file_map = {}
    for file_idx, match_idx, score, context in chunk_info:
        chunk_pair = (query, context)
        all_chunks.append(chunk_pair)
        chunk_to_file_map[chunk_pair] = (file_idx, match_idx)
    
    # If we have chunks to rerank
    if all_chunks:
        print(f"Applying reranking to {len(all_chunks)} chunks using hybrid scoring (batch size: {batch_size})...")
        print(f"  - 50% semantic score from bge-reranker-v2-m3")
        print(f"  - 50% keyword matching score")
        
        # Process in batches to avoid memory issues
        reranker_scores = []
        for i in range(0, len(all_chunks), batch_size):
            batch = all_chunks[i:i + batch_size]
            batch_scores = reranker.predict(batch)
            reranker_scores.extend(batch_scores)
        
        # Add keyword search scores for hybrid ranking
        for i, chunk_pair in enumerate(all_chunks):
            _, context = chunk_pair
            file_idx, match_idx = chunk_to_file_map[chunk_pair]
            
            # Get original semantic score
            original_score = embedding_results[file_idx]["matches"][match_idx]["score"]
            
            # Calculate keyword matching score (percentage of query keywords found in the context)
            keywords = [kw.lower() for kw in query.lower().split() if len(kw) > 2]  # Skip very short words
            if not keywords:  # If no keywords (after filtering), use all words
                keywords = [kw.lower() for kw in query.lower().split()]
                
            context_lower = context.lower()
            
            if keywords:
                matched_keywords = sum(1 for keyword in keywords if keyword in context_lower)
                keyword_score = matched_keywords / len(keywords)
            else:
                keyword_score = 0.0
            
            # Hybrid score: 50% semantic (reranker), 50% keyword
            hybrid_score = (reranker_scores[i] * 0.5) + (keyword_score * 0.5)
            
            # Update the score in the original results
            embedding_results[file_idx]["matches"][match_idx]["score"] = float(hybrid_score)
            embedding_results[file_idx]["matches"][match_idx]["reranked"] = True
            embedding_results[file_idx]["matches"][match_idx]["semantic_score"] = float(reranker_scores[i])
            embedding_results[file_idx]["matches"][match_idx]["keyword_score"] = float(keyword_score)
        
        # Re-sort matches by new scores
        for file_result in embedding_results:
            file_result["matches"].sort(key=lambda x: x["score"], reverse=True)

    # Final memory cleanup after reranking
    try:
        import torch
        import gc
        gc.collect()
        torch.cuda.empty_cache()
    except (ImportError, AttributeError):
        pass

    return embedding_results

def display_results(results, output_file=None, max_results=20):
    """Display search results in a human-readable format."""
    if not results:
        print("No matches found.")
        return
    
    # Initialize output string for file writing
    output_content = ""
    
    def add_to_output(text):
        nonlocal output_content
        print(text)
        if output_file:
            output_content += text + "\n"
    
    # Track total results for limiting
    total_results_shown = 0
    
    # Check if results have been reranked
    has_reranked = any("reranked" in match.keys() for file_result in results for match in file_result["matches"])
    
    for file_result in results:
        file_path = file_result["file_path"]
        matches = file_result["matches"]
        
        # Calculate how many results we can show from this file
        results_to_show = min(len(matches), max_results - total_results_shown)
        if results_to_show <= 0:
            break
            
        # Only show results we have room for
        shown_matches = matches[:results_to_show]
        
        add_to_output(f"\nFile: {file_path}")
        add_to_output(f"Found {len(shown_matches)} matches (showing {results_to_show} of {len(matches)})")
        add_to_output("-" * 80)
        
        for i, match in enumerate(shown_matches, 1):
            context = match["context"]
            # For better readability, wrap long lines
            wrapped_context = textwrap.fill(context, width=100)
            
            # Show reranking information if available
            if "reranked" in match and match["reranked"]:
                semantic_score = match.get("semantic_score", 0.0)
                keyword_score = match.get("keyword_score", 0.0)
                add_to_output(f"Match {i} (hybrid score: {match['score']:.2f}, semantic: {semantic_score:.2f}, keyword: {keyword_score:.2f}):")
            else:
                add_to_output(f"Match {i} (score: {match['score']:.2f}):")
                
            add_to_output(wrapped_context)
            add_to_output("-" * 80)
        
        total_results_shown += results_to_show
        if total_results_shown >= max_results:
            add_to_output(f"\nShowing {total_results_shown} of {sum(len(fr['matches']) for fr in results)} total matches (limit: {max_results})")
            break
    
    # Write to output file if specified
    if output_file:
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(output_content)
        print(f"\nResults written to {output_file}")

def preview_chunks(text_files, chunk_size=512, overlap=50, output_file=None):
    """Preview chunks for each file without performing embedding search."""
    # If output_file is specified, open it for writing
    output_file_handle = None
    if output_file:
        try:
            output_file_handle = open(output_file, 'w', encoding='utf-8')
            print(f"Writing full chunks to {output_file}")
        except Exception as e:
            print(f"Error opening output file {output_file}: {e}")
            output_file = None
    else:
        print("Note: No output file specified. Use --output filename.txt to save chunks to a file.")
    
    try:
        for file_path, content in text_files.items():
            print(f"\nFile: {file_path}")
            print("=" * 80)
            
            if output_file_handle:
                output_file_handle.write(f"File: {file_path}\n")
                output_file_handle.write("=" * 80 + "\n")
            
            # Use markdown-specific chunking for markdown files
            if file_path.suffix.lower() == '.md':
                chunks, chunk_positions = split_markdown_into_chunks(content, chunk_size)
            else:
                # Standard chunking for non-markdown files
                chunks = []
                chunk_positions = []
                
                for i in range(0, len(content) - overlap, chunk_size - overlap):
                    chunk = content[i:i + chunk_size]
                    if len(chunk.strip()) > 0:  # Skip empty chunks
                        chunks.append(chunk)
                        chunk_positions.append(i)
            
            if not chunks:
                print("  No chunks created.")
                if output_file_handle:
                    output_file_handle.write("  No chunks created.\n\n")
                continue
                
            print(f"  Total chunks: {len(chunks)}\n")
            if output_file_handle:
                output_file_handle.write(f"  Total chunks: {len(chunks)}\n\n")
            
            for i, (chunk, position) in enumerate(zip(chunks, chunk_positions)):
                print(f"  Chunk #{i+1} (position: {position}, length: {len(chunk)})")
                print("-" * 80)
                # Print first 100 chars as preview in the console
                preview = chunk[:100].replace('\n', ' ').strip() + "..." if len(chunk) > 100 else chunk
                print(f"  {preview}")
                print("-" * 80 + "\n")
                
                # Write full chunk to output file if specified
                if output_file_handle:
                    output_file_handle.write(f"  Chunk #{i+1} (position: {position}, length: {len(chunk)})\n")
                    output_file_handle.write("-" * 80 + "\n")
                    output_file_handle.write(f"{chunk}\n")
                    output_file_handle.write("-" * 80 + "\n\n")
    finally:
        # Close output file if it was opened
        if output_file_handle:
            output_file_handle.close()
            print(f"\nFull chunks have been written to {output_file}")
        elif output_file:
            print(f"\nWarning: Failed to write chunks to {output_file}")

def main():
    parser = argparse.ArgumentParser(description='Search for context in text files.')
    
    # Fixed all help strings to avoid any format specifiers (%, {}, etc.)
    parser.add_argument('query', type=str, help='Search query', nargs='?')
    parser.add_argument('--embedding', action='store_true', 
                        help='Enable semantic search using embeddings')
    parser.add_argument('--reranker', action='store_true',
                        help='Enable reranking of search results using bge-reranker-v2-m3 with hybrid scoring (50 percent semantic, 50 percent keyword matching)')
    parser.add_argument('--no-reranker', action='store_true',
                        help='[DEPRECATED] Reranking is now disabled by default with --embedding. Use --reranker to enable it.')
    parser.add_argument('--dir', type=str, default='text',
                        help='Directory containing text files (default: text)')
    parser.add_argument('--file', type=str, 
                        help='Process a specific file instead of a directory')
    parser.add_argument('--threshold', type=float, default=0.3,
                        help='Similarity threshold for embedding search (default: 0.3)')
    parser.add_argument('--chunk-size', type=int, default=512,
                        help='Maximum chunk size for text splitting (default: 512)')
    parser.add_argument('--preview', action='store_true',
                        help='Preview chunking results without performing search')
    parser.add_argument('--output', type=str,
                        help='Save full chunks to specified file when in preview mode')
    parser.add_argument('--output-results', type=str,
                        help='Save search results to specified file')
    parser.add_argument('--no-phrase-mode', action='store_true',
                        help='Split search query into individual terms instead of treating as a phrase')
    parser.add_argument('--clean', action='store_true',
                        help='Rebuild embeddings instead of using cached ones')
    parser.add_argument('--batch-size', type=int, default=32,
                        help='Batch size for embedding processing (default: 32)')
    parser.add_argument('--reranker-batch-size', type=int, default=16,
                        help='Batch size for reranker processing (default: 16, use smaller values if out of memory)')
    parser.add_argument('--max-rerank-chunks', type=int, default=300,
                        help='Maximum number of chunks to rerank (default: 300)')
    parser.add_argument('-n', '--num-results', type=int, default=20,
                        help='Maximum number of results to display (default: 20)')
    parser.add_argument('--project', type=str, default='default',
                        help='Project name to define directory structure as project/proj_name (default: default)')
    
    args = parser.parse_args()
    
    # Ensure query is provided unless --help or --preview is being used
    if args.query is None and not args.preview:
        if '--help' in sys.argv or '-h' in sys.argv:
            parser.print_help()
            return
        else:
            parser.error('the following arguments are required: query')
    
    # Define project directory structure
    project_base_dir = os.path.join('project', args.project)
    project_papers_dir = os.path.join(project_base_dir, 'papers')
    
    # Override the default directory with project-specific directory if --dir is not explicitly provided
    if args.dir == 'text' and not args.file:
        args.dir = project_papers_dir
        print(f"Using project directory for text files: {args.dir}")
    
    # When embedding is enabled, reranking is disabled by default, use --reranker to enable it
    use_reranker = args.embedding and args.reranker

    # Show warning if deprecated --no-reranker flag is used
    if args.no_reranker:
        print("Warning: --no-reranker flag is deprecated as reranking is now disabled by default with --embedding.")
        print("To enable reranking, use --reranker instead.")
        # Force reranking off if the flag is explicitly used
        use_reranker = False

    if args.embedding and use_reranker:
        print("Using reranking with 'bge-reranker-v2-m3' model")
    
    # Load text files - either a specific file or all files in directory
    text_files = load_text_files(args.dir, specific_file=args.file)
    
    if not text_files:
        if args.file:
            print(f"Could not load file: {args.file}")
        else:
            print(f"No text files found in {args.dir}")
        return
    
    print(f"Loaded {len(text_files)} text file(s)")
    
    # If preview mode is enabled, show chunks and exit
    if args.preview:
        preview_chunks(text_files, chunk_size=args.chunk_size, overlap=50, output_file=args.output)
        return
    
    # Perform search
    if args.embedding:
        if not HAVE_EMBEDDING:
            print("Error: Embedding search requires sentence_transformers module.")
            print("Install it with: pip install sentence-transformers")
            return
        
        results = embedding_search(text_files, args.query, 
                                  chunk_size=args.chunk_size, 
                                  similarity_threshold=args.threshold,
                                  clean=args.clean,
                                  batch_size=args.batch_size)
        
        # If reranker is enabled, rerank the results
        if use_reranker and results:
            if not HAVE_RERANKER:
                print("Error: Reranking requires sentence_transformers with CrossEncoder support.")
                print("Install it with: pip install -U sentence-transformers")
            else:
                # Clean up memory from embedding model before reranking
                try:
                    import torch
                    import gc
                    print("Cleaning up GPU memory after embedding search...")
                    gc.collect()
                    torch.cuda.empty_cache()
                except (ImportError, AttributeError):
                    print("GPU memory cleanup not available (torch not installed or no CUDA)")
                
                results = rerank_results(text_files, args.query, results, 
                                       similarity_threshold=args.threshold,
                                       batch_size=args.reranker_batch_size,
                                       max_chunks=args.max_rerank_chunks)
    else:
        # Use keyword search with phrase mode unless disabled
        results = keyword_search(text_files, args.query, phrase_mode=not args.no_phrase_mode)
    
    # Display results with limit
    display_results(results, output_file=args.output_results, max_results=args.num_results)

if __name__ == "__main__":
    main()