# /Users/xing/agentic_jira/src/knowledge/rag.py
import os
from typing import List, Dict, Any
from config import config
import logging
from elasticsearch import Elasticsearch
from sentence_transformers import SentenceTransformer
import hashlib

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class RAGKnowledgeBase:
    """
    RAG Knowledge Base class that handles document retrieval and querying using Elasticsearch as vector database.
    This module is separated to allow reuse across different agents.
    """
    
    def __init__(self):
        self.enabled = config.rag_enabled
        self.knowledge_path = config.knowledge_base_path
        self.es_host = config.elasticsearch_host
        self.index_name = config.elasticsearch_index
        self.embedding_model_name = config.embedding_model
        
        # Initialize Elasticsearch client
        self.es_client = None
        self.embedding_model = None
        
        logger.info(f"RAGKnowledgeBase initialized. Enabled: {self.enabled}")
        logger.info(f"Knowledge base path: {self.knowledge_path}")
        logger.info(f"Elasticsearch host: {self.es_host}")
        logger.info(f"Embedding model: {self.embedding_model_name}")
        
        if self.enabled:
            self._initialize_elasticsearch()
            self._initialize_embedding_model()
            self.load_knowledge_base()
    
    def _initialize_elasticsearch(self):
        """Initialize Elasticsearch connection"""
        try:
            # Ensure the URL includes scheme
            if not self.es_host.startswith(('http://', 'https://')):
                es_url = f"http://{self.es_host}"
            else:
                es_url = self.es_host
                
            self.es_client = Elasticsearch([es_url])
            logger.info("Connected to Elasticsearch successfully")
            
            # Create index with vector mapping if it doesn't exist
            self._create_index_if_not_exists()
        except Exception as e:
            logger.error(f"Failed to initialize Elasticsearch: {e}")
            self.enabled = False
    
    def _initialize_embedding_model(self):
        """Initialize the embedding model from ModelScope based on config"""
        try:
            logger.info(f"Initializing {self.embedding_model_name} model from ModelScope")
            
            # Use the model name from config
            modelscope_model = self.embedding_model_name
            
            # Try to load from ModelScope
            try:
                from modelscope.hub.snapshot_download import snapshot_download
                model_dir = snapshot_download(modelscope_model)
                self.embedding_model = SentenceTransformer(model_dir)
                logger.info(f"Successfully initialized {self.embedding_model_name} model from ModelScope")
            except Exception as model_scope_error:
                logger.warning(f"Failed to load {self.embedding_model_name} from ModelScope: {model_scope_error}")
                # Fallback to HuggingFace with mirror
                os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
                self.embedding_model = SentenceTransformer(self.embedding_model_name)          
                
        except ImportError:
            # If ModelScope is not available, use fallback with mirror
            try:
                logger.info("ModelScope not available, using HuggingFace with mirror")
                os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
                self.embedding_model = SentenceTransformer(self.embedding_model_name)
            except Exception as hf_error:
                logger.error(f"Failed to initialize {self.embedding_model_name} model: {hf_error}")
                self.enabled = False
        except Exception as e:
            logger.error(f"Failed to initialize {self.embedding_model_name} model: {e}")
            self.enabled = False
    
    def _create_index_if_not_exists(self):
        """Create Elasticsearch index with proper mapping for vector search"""
        try:
            # Get embedding dimension from the model if available
            dims = 384  # Default fallback dimension
            if self.embedding_model:
                dims = self.embedding_model.get_sentence_embedding_dimension()
                logger.info(f"Detected embedding dimension: {dims}")
            
            # Delete existing index if it exists (to avoid dimension mismatch)
            if self.es_client.indices.exists(index=self.index_name):
                logger.info(f"Deleting existing Elasticsearch index: {self.index_name}")
                self.es_client.indices.delete(index=self.index_name)
            
            # Create new index with correct dimensions
            mapping = {
                "mappings": {
                    "properties": {
                        "document_id": {"type": "keyword"},
                        "content": {"type": "text"},
                        "embedding": {
                            "type": "dense_vector",
                            "dims": dims,
                            "index": True,
                            "similarity": "cosine"
                        }
                    }
                }
            }
            self.es_client.indices.create(index=self.index_name, body=mapping)
            logger.info(f"Created Elasticsearch index: {self.index_name} with {dims} dimensions")
        except Exception as e:
            logger.error(f"Failed to create index: {e}")
    
    def _get_document_hash(self, content: str) -> str:
        """Generate a hash for document content to detect changes"""
        return hashlib.md5(content.encode()).hexdigest()
    
    def load_knowledge_base(self):
        """Load documents from the knowledge base and store embeddings in Elasticsearch"""
        logger.info("Loading knowledge base into Elasticsearch")
        
        if not self.enabled:
            logger.info("Knowledge base is disabled")
            return
            
        if not os.path.exists(self.knowledge_path):
            logger.warning(f"Knowledge base path does not exist: {self.knowledge_path}")
            # Try to create the directory
            try:
                os.makedirs(self.knowledge_path, exist_ok=True)
                logger.info(f"Created knowledge base directory: {self.knowledge_path}")
            except Exception as e:
                logger.error(f"Failed to create knowledge base directory: {e}")
            return
        
        # Load documents
        try:
            for filename in os.listdir(self.knowledge_path):
                if filename.endswith('.txt') or filename.endswith('.md'):
                    file_path = os.path.join(self.knowledge_path, filename)
                    logger.info(f"Processing document: {filename}")
                    
                    with open(file_path, 'r', encoding='utf-8') as f:
                        content = f.read()
                        
                    # Generate embedding
                    if self.embedding_model:
                        embedding = self.embedding_model.encode(content).tolist()
                    else:
                        # Fallback: create dummy embedding if model failed to load
                        # Use actual dimension if we can determine it, otherwise fallback to 384
                        dims = 384
                        if self.embedding_model:
                            dims = self.embedding_model.get_sentence_embedding_dimension()
                        embedding = [0.0] * dims
                    
                    # Index document in Elasticsearch
                    doc = {
                        "document_id": filename,
                        "content": content,
                        "embedding": embedding
                    }
                    
                    self.es_client.index(
                        index=self.index_name,
                        id=self._get_document_hash(content),
                        body=doc
                    )
                    
            logger.info(f"Indexed documents into Elasticsearch index: {self.index_name}")
        except Exception as e:
            logger.error(f"Error loading knowledge base: {e}")
    
    def query(self, question: str, top_k: int = 5) -> str:
        """Query the knowledge base using Elasticsearch vector search"""
        logger.info(f"Querying knowledge base with question: {question}")
        
        if not self.enabled:
            logger.warning("RAG is disabled")
            return "RAG is disabled"
        
        try:
            # Generate embedding for the question
            if self.embedding_model:
                question_embedding = self.embedding_model.encode(question).tolist()
            else:
                # Fallback: create dummy embedding if model failed to load
                # Use actual dimension if we can determine it, otherwise fallback to 384
                dims = 384
                if self.embedding_model:
                    dims = self.embedding_model.get_sentence_embedding_dimension()
                question_embedding = [0.0] * dims
            
            # Perform vector search in Elasticsearch
            search_body = {
                "size": top_k,
                "query": {
                    "script_score": {
                        "query": {"match_all": {}},
                        "script": {
                            "source": "cosineSimilarity(params.query_vector, 'embedding') + 1.0",
                            "params": {"query_vector": question_embedding}
                        }
                    }
                }
            }
            
            response = self.es_client.search(
                index=self.index_name,
                body=search_body
            )
            
            # Extract relevant documents
            retrieved_docs = []
            for hit in response['hits']['hits']:
                retrieved_docs.append({
                    'content': hit['_source']['content'],
                    'score': hit['_score']
                })
            
            # Format response with retrieved documents
            docs_info = "\n".join([f"- {doc['content'][:200]}..." for doc in retrieved_docs[:3]])
            response_text = f"Retrieved {len(retrieved_docs)} relevant documents:\n{docs_info}"
            
            logger.info(f"Retrieved {len(retrieved_docs)} documents from Elasticsearch")
            return response_text
            
        except Exception as e:
            logger.error(f"Error querying knowledge base: {e}")
            return f"Error querying knowledge base: {str(e)}"

# Example usage:
if __name__ == "__main__":
    rag = RAGKnowledgeBase()
    if rag.enabled:
        result = rag.query("What is the project status?")
        print(result)