#!/usr/bin/env python3
import sys
import argparse
from llama_index.core import VectorStoreIndex, Settings
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import chromadb

# --- 配置 ---
CHROMA_DB_PATH = "./oh_chroma_db"
EMBED_MODEL_NAME = "./bge-m3"

def main():
    # --- 命令行参数解析 ---
    parser = argparse.ArgumentParser(
        description="Retrieve relevant code snippets from the OpenHarmony RAG knowledge base.",
        formatter_class=argparse.RawTextHelpFormatter
    )
    parser.add_argument("query", type=str, help="The question or query string to retrieve context for.")
    parser.add_argument(
        "-k", "--top_k",
        type=int,
        default=15, # 增加默认检索数量
        help="The number of top similar nodes to retrieve. Default: 15"
    )
    args = parser.parse_args()
    
    query = args.query
    top_k = args.top_k

    # --- 1. 加载嵌入模型 (日志输出到 stderr) ---
    print("INFO: Loading embedding model...", file=sys.stderr)
    Settings.embed_model = HuggingFaceEmbedding(model_name=EMBED_MODEL_NAME)

    # --- 2. 连接到向量数据库 ---
    print(f"INFO: Connecting to vector database: {CHROMA_DB_PATH}", file=sys.stderr)
    db = chromadb.PersistentClient(path=CHROMA_DB_PATH)
    chroma_collection = db.get_or_create_collection("openharmony_code")
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)

    # --- 3. 从向量数据库加载索引 ---
    print("INFO: Loading index from vector store...", file=sys.stderr)
    index = VectorStoreIndex.from_vector_store(vector_store)

    # --- 4. 创建一个检索器 (Retriever) ---
    # 为了确保过滤后仍有足够的结果，我们检索更多数量的节点
    increased_top_k = top_k * 5
    print(f"INFO: Creating retriever to find top {increased_top_k} nodes (before filtering)...", file=sys.stderr)
    retriever = index.as_retriever(similarity_top_k=increased_top_k)

    # --- 5. 执行检索和过滤 ---
    print(f"INFO: Retrieving snippets for query: '{query}'", file=sys.stderr)
    all_nodes = retriever.retrieve(query)

    # 在客户端进行过滤，忽略所有.md文件
    non_markdown_nodes = [node for node in all_nodes if not node.metadata.get("file_path", "").endswith(".md")]
    retrieved_nodes = non_markdown_nodes[:top_k] # 只取过滤后的前k个
    print(f"INFO: Retrieved {len(all_nodes)} nodes, filtered down to {len(non_markdown_nodes)} non-markdown nodes.", file=sys.stderr)

    # --- 6. 将结果干净地打印到 stdout ---
    print(f"# Top {len(retrieved_nodes)} most relevant snippets for question: '{query}'\n")
    if retrieved_nodes:
        for i, node in enumerate(retrieved_nodes):
            # 打印丰富的元数据
            metadata = node.metadata
            score = node.score
            path = metadata.get("file_path", "N/A")
            lang = metadata.get("language", "N/A")
            subsystem = metadata.get("subsystem", "N/A")
            
            print(f"# --- Snippet {i+1} (Score: {score:.4f} | Lang: {lang} | Subsystem: {subsystem}) ---")
            print(f"# Source: {path}")
            print(node.get_content())
            print("# ------------------------------------------------------------------------------------\n")
    else:
        print("# --- No relevant snippets found. ---")
            
    print("======================================================================================")
    print("\nINFO: Copy the context above and paste it to me with your original question.", file=sys.stderr)

if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        print(f"ERROR: An unexpected error occurred: {e}", file=sys.stderr)
        sys.exit(1)
