import logging
from typing import List

from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_postgres import PGVector
from langchain_text_splitters import CharacterTextSplitter

from config.settings import Setting

logger = logging.getLogger(__name__)


class Retriever:
    """基础检索器类，提供向量数据库检索功能。"""

    def __init__(self, collection_name: str = "chatbot"):
        self.embeddings = OllamaEmbeddings(base_url="http://localhost:11434", model="dengcao/Qwen3-Embedding-0.6B:Q8_0")

        self.embedding_filter = EmbeddingsFilter(embeddings=self.embeddings, similarity_threshold=0.5)

        self.pg_vector = PGVector(
            connection=PGVector.connection_string_from_db_params(
                driver="psycopg",
                host=Setting.pg_host,
                port=int(Setting.pg_port),
                user=Setting.pg_user,
                password=Setting.pg_password,
                database=Setting.pg_database,
            ),
            embeddings=self.embeddings,
            collection_name=collection_name,
        )
        self.retriever = self.pg_vector.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={
                'score_threshold': 0.1,
                'k': 5
            }
        )
        self.compression_retriever = ContextualCompressionRetriever(
            base_compressor=self.embedding_filter, base_retriever=self.retriever
        )
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=2000,
            chunk_overlap=200,
            separators=["\n\n", "\n", "。"],
        )
        self.docs_resolver = docs_resolve


def docs_resolve(documents: List[Document]) -> List[Document]:
    """
    解析文档列表，返回一个新的文档列表。
    这里可以添加更多的解析逻辑，在不同的子类中可以重写此方法以实现特定的解析逻辑。
    """
    if not documents:
        return []
    return documents


class CustomTextSplitter(CharacterTextSplitter):
    """
    自定义文本分割器，继承自CharacterTextSplitter。
    主要用于将文本分割成更小的块，以便于后续处理。
    """

    def __init__(self, chunk_size: int = 100, chunk_overlap: int = 0):
        super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap)

    def split_text(self, text: str) -> list:
        """
        重写文本分割方法，使用简单的字符分割。
        """
        lines = [line for line in text.strip().split("\n") if line.strip()]
        return lines if lines else []

def doc_resolve(documents: List[Document]) -> List[Document]:
    """
    解析文档列表，返回一个新的文档列表，将数据按|进行拆分
    """
