import configparser
from pathlib import Path

# We use ChatOpenAI for any OpenAI-compatible API, including embeddings
from langchain_openai import OpenAIEmbeddings
from langchain_postgres import PGVector
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document

from logger_setup import log

SCRIPT_DIR = Path(__file__).resolve().parent
CONFIG_PATH = SCRIPT_DIR / 'config.ini'

class KnowledgeBaseManager:
    """
    Manages the vector store using a configurable embedding provider.
    """
    def __init__(self):
        self.config = configparser.ConfigParser()
        self.config.read(CONFIG_PATH)
        
        self.transcripts_path = Path(self.config.get('Paths', 'transcripts_output').strip())
        self.collection_name = self.config.get('Database', 'collection_name')
        
        log.info("Initializing embeddings...")
        self._init_embeddings()
        log.info("Embeddings initialized successfully.")

        # Initialize the PGVector store instance for retrieval
        log.info("Connecting to vector store...")
        db_config = self.config['Database']
        connection_string = (
            f"postgresql+psycopg2://{db_config.get('user')}:{db_config.get('password')}"
            f"@{db_config.get('host')}:{db_config.get('port')}/{db_config.get('dbname')}"
        )
        
        self.vector_store = PGVector(
            connection=connection_string,
            collection_name=self.collection_name,
            embeddings=self.embeddings,
        )
        log.info("Vector store connected successfully.")

        log.info("KnowledgeBaseManager initialized.")

    def _init_embeddings(self):
        """Loads the embedding model from the active embedding profile."""
        try:
            active_profile_name = self.config.get('Embedding', 'active_profile')
            log.info(f"Loading embedding model using profile: '{active_profile_name}'")
            
            profile = self.config[active_profile_name]
            api_key = profile.get('api_key')
            base_url = profile.get('base_url')
            model_name = profile.get('model_name')

            if not base_url.startswith(('http://', 'https://')):
                raise ValueError(f"Invalid base_url format: {base_url}")

            if not api_key or not base_url or not model_name:
                raise ValueError(f"Profile '{active_profile_name}' is missing api_key, base_url, or model_name.")

            # OpenAIEmbeddings can be used for any OpenAI-compatible embedding API
            self.embeddings = OpenAIEmbeddings(
                model=model_name,
                openai_api_key=api_key,
                openai_api_base=base_url,
            )
            log.info(f"Successfully configured embeddings for model '{model_name}' at '{base_url}'")

        except (configparser.NoSectionError, configparser.NoOptionError, ValueError) as e:
            log.critical(f"Error loading embedding configuration: {e}", exc_info=True)
            raise

    def populate_from_notes(self):
        """
        Finds all notes.md files, chunks them, and creates/populates the vector store.
        """
        log.info("--- Starting Knowledge Base Population ---")
        notes_files = list(self.transcripts_path.rglob('notes.md'))

        if not notes_files:
            log.warning(f"No 'notes.md' files found in {self.transcripts_path}. Nothing to populate.")
            return

        log.info(f"Found {len(notes_files)} note file(s) to process.")
        
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=400,
            chunk_overlap=80,
            length_function=len,
        )

        all_docs = []
        for note_file in notes_files:
            log.info(f"Processing note file: {note_file}")
            with open(note_file, 'r', encoding='utf-8') as f:
                content = f.read()
            
            chunks = text_splitter.split_text(content)
            
            for chunk in chunks:
                doc = Document(
                    page_content=chunk,
                    metadata={"source": str(note_file)}
                )
                all_docs.append(doc)

        if not all_docs:
            log.warning("No content to add to the vector store.")
            return

        log.info(f"Adding {len(all_docs)} document chunks to the vector store. This may take a while...")
        
        db_config = self.config['Database']
        connection_string = (
            f"postgresql+psycopg2://{db_config.get('user')}:{db_config.get('password')}"
            f"@{db_config.get('host')}:{db_config.get('port')}/{db_config.get('dbname')}"
        )

        PGVector.from_documents(
            documents=all_docs,
            embedding=self.embeddings,
            collection_name=self.collection_name,
            connection=connection_string, # 这里传递的是构建好的连接字符串
            pre_delete_collection=True, 
        )
        
        log.info("Successfully populated the vector store.")
        log.info("--- Knowledge Base Population Complete ---")

if __name__ == '__main__':
    manager = KnowledgeBaseManager()
    manager.populate_from_notes()
