import sys

from elasticsearch import Elasticsearch
from langchain_community.embeddings import OllamaEmbeddings
from langchain_elasticsearch import ElasticsearchStore
from langchain_text_splitters import MarkdownTextSplitter

sys.modules["sqlite3"] = __import__("pysqlite3")  # 劫持旧模块

import os

from log import LOGGER

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'  # 国内可用
os.environ['HF_HUB_DISABLE_SYMLINKS'] = '1'  # 避免 Windows 符号链接报错

from langchain.chains import RetrievalQA
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import DirectoryLoader, TextLoader
from langchain_community.llms import Ollama
from langchain_community.vectorstores import Chroma
from chromadb import Settings

DFT_OLLAMA_URL = "http://localhost:11434"
EMBEDDING_MODEL = "bge-m3"  # "paraphrase-multilingual-MiniLM-L12-v2"
CHROMA_PERSIST_DIR = "./chroma_db"
VEC_INDEX = "knowledge"

CHUNK_SZ = 300
OVERLAP_SZ = 50

VEC_DB_ES = 0
VEC_DB_CHROMA = 1

# ES index
# PUT knowledge
# {
#   "mappings": {
#     "properties": {
#       "content": { "type": "text" },
#       "content_vector": {
#         "type": "dense_vector",
#         "dims": 1024,
#         "index": true,
#         "similarity": "cosine"
#       }
#     }
#   }
# }
TXT_FLD = "content"
VEC_FLD = "content_vector"


def build_vector_store(vec_db_type, splitter_cls=None):
    # --------- 1. 加载文档 ---------
    loader = DirectoryLoader(
        "data",
        glob="*.txt",
        loader_cls=TextLoader,
        loader_kwargs={'autodetect_encoding': True}
    )
    docs = loader.load()
    LOGGER.info('total %d docs' % (len(docs)))

    # --------- 2. 分块 ---------
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=CHUNK_SZ,
        chunk_overlap=OVERLAP_SZ) if splitter_cls is None else splitter_cls(
        chunk_size=CHUNK_SZ,
        chunk_overlap=OVERLAP_SZ
    )
    splits = text_splitter.split_documents(docs)
    LOGGER.info("splits:%s", splits)

    # --------- 3. 向量化（中文句向量，纯 CPU） ---------
    # 原本用huggingFace，现改为ollama
    # SentenceTransformerEmbeddings
    # embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)  # 首次自动下载
    embeddings = OllamaEmbeddings(model=EMBEDDING_MODEL, base_url=DFT_OLLAMA_URL)
    LOGGER.info("loading sbert model success")

    # --------- 4. 建库 ---------
    # persist_directory 可省略，这里方便二次启动
    if vec_db_type == VEC_DB_CHROMA:
        Chroma.from_documents(
            documents=splits,
            embedding=embeddings,
            persist_directory=CHROMA_PERSIST_DIR)
    else:
        # ES
        es = Elasticsearch("http://localhost:9200")
        vectorstore = ElasticsearchStore(
            es_connection=es,
            index_name=VEC_INDEX,
            embedding=embeddings,  # 关键：指定本地模型
            strategy="DenseVectorStrategy",  # 纯稠密向量
            ## 必须明确指定文本和对应的向量字段,否则默认使用text作为文本字段名,vector作为向量字段名
            query_field=TXT_FLD,  # 文本字段
            vector_query_field=VEC_FLD,  # 向量字段
        )

        # 4. 写入（用内容哈希当 _id，幂等）
        vectorstore.add_documents(splits)

    LOGGER.info("persist to vectordb success")


def rag_and_qa(vec_db_type):
    embeddings = OllamaEmbeddings(model=EMBEDDING_MODEL, base_url=DFT_OLLAMA_URL)
    LOGGER.info("loading sbert model success")

    if vec_db_type == VEC_DB_CHROMA:
        vectorstore = Chroma(
            persist_directory=CHROMA_PERSIST_DIR,
            embedding_function=embeddings,
            client_settings=Settings(
                persist_directory=CHROMA_PERSIST_DIR,
                is_persistent=True
            )
        )
    else:
        vectorstore = ElasticsearchStore(
            es_connection=Elasticsearch("http://localhost:9200"),
            index_name=VEC_INDEX,
            embedding=embeddings,  # 关键：指定本地模型
            query_field=TXT_FLD,  # 文本字段
            vector_query_field=VEC_FLD,  # 向量字段
            # strategy=ApproxRetrievalStrategy(hybrid=True, rrf=True)
        )

    retriever = vectorstore.as_retriever(search_kwargs={"k": 2})

    # --------- 5. LLM（Ollama 本地 CPU 可跑） ---------
    llm = Ollama(model="qwen2.5:1.5b")  # 确保已 ollama pull qwen2.5:1.5b-q4_K_M
    LOGGER.info("run llm success")

    # --------- 6. 自定义 Prompt（中文） ---------
    # 这里自定义提示词，但其实不同的chain_type会使用各自默认的Prompt
    # template = """基于以下已知内容，简洁准确地回答问题：
    # {context}
    # 问题：{question}"""
    # QA_PROMPT = PromptTemplate(
    #     input_variables=["context", "question"],
    #     template=template)

    # --------- 7. 构建 RAG 链 ---------
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",  # stuff简单合并上下文，还有refine/map_reduce等模式
        retriever=retriever,
        # chain_type_kwargs={"prompt": QA_PROMPT},
        return_source_documents=True)
    LOGGER.info("attach rag success")

    # --------- 8. 运行问答 ---------
    while True:
        q = input("\n问题（输入 exit 退出）：")
        if q.strip().lower() == "exit":
            break
        if not q.strip():
            continue
        res = qa_chain.invoke({"query": q})
        print("AI：", res["result"])
        print("\n来源：")
        for doc in res["source_documents"]:
            print("  -", doc.metadata["source"], "第", doc.metadata.get("page", 0), "页")


def vectorize_by_es():
    build_vector_store(VEC_DB_ES, MarkdownTextSplitter)


def mock_es_query():
    model = OllamaEmbeddings(
        model="bge-m3",
        base_url="http://localhost:11434"
    )

    q = 'python三方包javalang的accept函数说明'
    vec = model.embed_query(q)

    es = Elasticsearch("http://localhost:9200")

    # 2.1 构造 text 查询子句
    text_query = {
        "match": {
            "content": {
                "query": q,
                "boost": 1.0
            }
        }
    }

    # 2.2 构造 knn 查询子句
    knn_query = {
        "field": "content_vector",
        "query_vector": vec,
        "k": 3,
        "num_candidates": 100,
        "boost": 1.0
    }

    rrf_setting = {
        "rrf": {
            "window_size": 100,
            "rank_constant": 60
        }
    }

    retriever_task = {
        "rrf": {
            "retrievers": [
                {
                    "standard": {
                        "query": {
                            "match": {
                                "content": {
                                    "query": q
                                }
                            }
                        }
                    }
                },
                {
                    "knn": {
                        "field": "content_vector",
                        "query_vector": vec,
                        "k": 3,
                        "num_candidates": 100
                    }
                }
            ],
            "rank_window_size": 100,
            "rank_constant": 60
        }
    }

    # 3. 构造 kNN 查询
    resp = es.search(
        index=VEC_INDEX,
        retriever=retriever_task,
        # knn=knn_query,
        # query=text_query,
        # rank=rrf_setting,
        _source=["content"],  # 只拿需要的字段
        size=5
    )
    print('total %d results' % len(resp["hits"]["hits"]))

    hits = resp["hits"]["hits"]
    print('\n=========\n'.join([h["_source"]['content'] for h in hits]))


if __name__ == '__main__':
    # vectorize_by_es()
    # mock_es_query()
    rag_and_qa(VEC_DB_ES)
