"""Retriever module for academic paper search."""
from typing import Dict, List, Optional, Set
import re
import requests
from pydantic import BaseModel

from config import RAG_SERVER_URL, DEFAULT_TOP_K, MAX_CHUNKS_PER_DOC

class PaperChunk(BaseModel):
    """Paper chunk data model."""
    paper_id: str
    paper_title: str
    chunk_id: str
    chunk_text: str

class PaperSections(BaseModel):
    """Paper sections data model."""
    paper_id: str
    paper_title: str
    abstract: str = ""
    introduction: str = ""
    related_works: str = ""

class AcademicRetriever:
    """Academic paper retriever using custom RAG server."""
    
    def __init__(self, base_url: str = RAG_SERVER_URL):
        self.base_url = base_url.rstrip("/")
        
    def _make_request(self, endpoint: str, params: Dict) -> List[Dict]:
        """Make request to RAG server."""
        url = f"{self.base_url}/{endpoint}"
        response = requests.get(url, params=params)
        return response.json()
        
    def search_papers(self, query: str, top_k: int = DEFAULT_TOP_K) -> List[PaperChunk]:
        """Search papers by semantic similarity."""
        results = self._make_request("search_papers", {"query": query, "top_k": top_k})
        chunks = []
        for result in results:
            chunks.append(PaperChunk(
                paper_id=str(result["id"]),
                paper_title=result["entity"]["paper_title"],
                chunk_id=str(result["entity"]["chunk_id"]),
                chunk_text=result["entity"]["chunk_text"]
            ))
        return chunks
        
    def get_paper_chunks(self, paper_id: str, top_k: int = MAX_CHUNKS_PER_DOC) -> List[PaperChunk]:
        """Get chunks from a specific paper."""
        results = self._make_request("query_by_paper_id", 
                                   {"paper_id": paper_id, "top_k": top_k})
        chunks = []
        for result in results:
            chunks.append(PaperChunk(
                paper_id=paper_id,
                paper_title=result["paper_title"],
                chunk_id=str(result["chunk_id"]),
                chunk_text=result["chunk_text"]
            ))
        return chunks
        
    def search_by_title(self, title: str, exact_match: bool = True) -> List[PaperChunk]:
        """Search papers by title."""
        endpoint = "query_by_title" if exact_match else "query_by_title_contain"
        results = self._make_request(endpoint, {"title": title})
        chunks = []
        for result in results:
            chunks.append(PaperChunk(
                paper_id=str(result["paper_id"]),
                paper_title=result["paper_title"],
                chunk_id=str(result["chunk_id"]),
                chunk_text=result["chunk_text"]
            ))
        return chunks

    def search_by_chunk(self, chunk: str, top_k: int = DEFAULT_TOP_K) -> List[PaperChunk]:
        """Search papers by chunk content."""
        results = self._make_request("query_by_chunk_contain", 
                                   {"chunk": chunk, "top_k": top_k})
        chunks = []
        for result in results:
            chunks.append(PaperChunk(
                paper_id=str(result["paper_id"]),
                paper_title=result["paper_title"],
                chunk_id=str(result["chunk_id"]),
                chunk_text=result["chunk_text"]
            ))
        return chunks

    def _extract_section(self, text: str, section_name: str) -> str:
        """Extract a section from text."""
        # Handle both "Introduction" and "I ntroduction" cases
        patterns = [
            re.compile(fr'{section_name}', re.IGNORECASE),
            re.compile(fr'I {section_name[1:]}', re.IGNORECASE)
        ]
        
        for pattern in patterns:
            if pattern.search(text):
                parts = pattern.split(text)
                if len(parts) > 1:
                    return f'{section_name}\n{parts[1]}'
        return ""

    def get_paper_sections(self, paper_id: str) -> PaperSections:
        """Get structured sections from a paper."""
        chunks = self.get_paper_chunks(paper_id)
        if not chunks:
            return None
            
        paper_title = chunks[0].paper_title
        abstract = introduction = related_works = ""
        
        for chunk in chunks:
            text = chunk.chunk_text
            
            # Try to extract each section
            if not abstract:
                abstract = self._extract_section(text, "Abstract")
            if not introduction:
                introduction = self._extract_section(text, "Introduction")
            if not related_works:
                related_works = self._extract_section(text, "Related Work")
                
        return PaperSections(
            paper_id=paper_id,
            paper_title=paper_title,
            abstract=abstract,
            introduction=introduction,
            related_works=related_works
        )

    def get_relevant_papers(self, topic: str) -> List[PaperSections]:
        """Get relevant papers with their sections for a topic."""
        # Collect paper IDs from different search methods
        paper_ids: Set[str] = set()
        
        # Method 1: Semantic search
        chunks = self.search_papers(topic)
        for chunk in chunks:
            paper_ids.add(chunk.paper_id)
            
        # Method 2: Title search
        chunks = self.search_by_title(topic, exact_match=False)
        for chunk in chunks:
            paper_ids.add(chunk.paper_id)
            
        # Method 3: Content search
        chunks = self.search_by_chunk(topic)
        for chunk in chunks:
            paper_ids.add(chunk.paper_id)
            
        # Get sections for each paper
        papers = []
        for paper_id in paper_ids:
            paper_sections = self.get_paper_sections(paper_id)
            if paper_sections:
                papers.append(paper_sections)
                
        return papers
            
    def consolidate_chunks(self, chunks: List[PaperChunk], max_chars: int = 100000) -> List[str]:
        """Consolidate chunks into strings under max_chars limit."""
        consolidated = []
        current_text = ""
        
        for chunk in chunks:
            chunk_text = (
                f"paper_title: {chunk.paper_title}\n"
                f"chunk_id: {chunk.chunk_id}\n"
                f"{chunk.chunk_text}\n\n"
            )
            
            if len(current_text) + len(chunk_text) > max_chars:
                consolidated.append(current_text)
                current_text = chunk_text
            else:
                current_text += chunk_text
                
        if current_text:
            consolidated.append(current_text)
            
        return consolidated

    def consolidate_sections(self, papers: List[PaperSections], section_type: str) -> List[str]:
        """Consolidate specific sections from multiple papers."""
        consolidated = ""
        result = []
        for paper in papers:
            section_text = getattr(paper, section_type, "")
            if section_text:
                new_text = (
                    f"paper_title: {paper.paper_title}\n"
                    f"{section_text}\n\n"
                )
                if len(consolidated) + len(new_text) > 100000:
                    result.append(consolidated)
                    consolidated = new_text
                else:
                    consolidated += new_text
        if consolidated:
            result.append(consolidated)
        return result

# Create singleton instance
retriever = AcademicRetriever() 