#!/usr/bin/env python3
import os
import logging
import sys
import json
from tqdm import tqdm
from typing import List, Dict, Set
from llama_index.core.schema import Document, BaseNode
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.node_parser.interface import NodeParser
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import CodeSplitter
import chromadb

# --- 1. 日志和配置 ---
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

PROJECT_ROOT = "."
BATCH_SIZE = 2048
CHROMA_DB_PATH = "./oh_chroma_db"
EMBED_MODEL_NAME = "./bge-m3"

# --- 2. 定义工作进程的初始化和执行函数 ---
worker_embed_model: BaseEmbedding
worker_splitter: NodeParser
worker_path_to_subsystem_map: Dict[str, str]

def init_worker(path_to_subsystem_map: Dict[str, str]):
    """Initializes the worker process with embedding models and the C/C++ code splitter."""
    global worker_embed_model, worker_splitter, worker_path_to_subsystem_map
    
    print("INFO: Initializing worker with embedding model and C/C++ CodeSplitter...", file=sys.stderr)
    worker_embed_model = HuggingFaceEmbedding(model_name=EMBED_MODEL_NAME)
    
    # 只为C/C++配置一个代码切分器
    # tree-sitter的'cpp'解析器同样适用于.h, .c等文件
    worker_splitter = CodeSplitter(language="cpp", chunk_lines=50, chunk_lines_overlap=15, max_chars=2000)
    worker_path_to_subsystem_map = path_to_subsystem_map
    print("INFO: Worker initialized.", file=sys.stderr)

def process_file(file_path: str) -> List[BaseNode]:
    """Reads a C/C++ file, splits it into nodes, and computes embeddings."""
    global worker_embed_model, worker_splitter, worker_path_to_subsystem_map
    try:
        with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
            text = f.read()
        
        if not text:
            return []

        dir_path, full_filename = os.path.split(file_path)
        filename_part, ext = os.path.splitext(full_filename)
        
        subsystem = "unknown"
        relative_path = os.path.relpath(dir_path, PROJECT_ROOT)
        best_match = ""
        for component_path, ss_name in worker_path_to_subsystem_map.items():
            if relative_path.startswith(component_path) and len(component_path) > len(best_match):
                best_match = component_path
                subsystem = ss_name

        metadata = {
            "file_path": file_path, "directory_path": dir_path, "file_name": full_filename,
            "extension": ext, "language": "c_cpp", "subsystem": subsystem
        }
        doc = Document(text=text, metadata=metadata)

        nodes = worker_splitter.get_nodes_from_documents([doc])
        
        if not nodes:
            return []

        texts = [node.get_content() for node in nodes]
        embeddings = worker_embed_model.get_text_embedding_batch(texts, show_progress=False)
        for node, embedding in zip(nodes, embeddings):
            node.embedding = embedding
        
        return nodes
    except Exception as e:
        print(f"ERROR processing {file_path}: {e}", file=sys.stderr)
        return []

# --- 3. 主进程逻辑 ---
if __name__ == "__main__":
    print("INFO: Building path-to-subsystem map from all bundle.json files...")
    path_to_subsystem = {}
    for dirpath, _, filenames in os.walk(PROJECT_ROOT, topdown=True):
        if "bundle.json" in filenames:
            try:
                bundle_path = os.path.join(dirpath, "bundle.json")
                with open(bundle_path, "r", encoding="utf-8") as f:
                    data = json.load(f)
                component_info = data.get("component", data)
                dest_path = component_info.get("segment", {}).get("destPath")
                subsystem_name = component_info.get("subsystem")
                if dest_path and subsystem_name:
                    path_to_subsystem[dest_path] = subsystem_name
            except Exception as e:
                print(f"WARNING: Could not parse {bundle_path}: {e}", file=sys.stderr)
    print(f"INFO: Path-to-subsystem map built. Found {len(path_to_subsystem)} components.")

    print(f"INFO: Initializing ChromaDB at {CHROMA_DB_PATH}")
    db = chromadb.PersistentClient(path=CHROMA_DB_PATH)
    chroma_collection = db.get_or_create_collection("openharmony_code")
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)

    print("INFO: Checking for already processed files in the database...")
    existing_files_res = chroma_collection.get(include=["metadatas"])
    processed_files_set: Set[str] = {metadata['file_path'] for metadata in existing_files_res['metadatas'] if metadata and 'file_path' in metadata}
    print(f"INFO: Found {len(processed_files_set)} already processed files.")

    dirs_to_process = [os.path.join(PROJECT_ROOT, "foundation"), os.path.join(PROJECT_ROOT, "base")]
    print(f"INFO: Target directories to scan: {dirs_to_process}")

    all_source_files = []
    # 核心修改：只索引C/C++相关文件
    required_exts_tuple = tuple([".c", ".cpp", ".h", ".hpp", ".cc"])
    exclude_dirs = set(['out', '.repo', '.git', 'prebuilts', 'third_party', 'node_modules'])

    for start_path in dirs_to_process:
        if not os.path.isdir(start_path):
            print(f"WARNING: Directory '{start_path}' does not exist. Skipping.")
            continue
        for dirpath, dirnames, filenames in os.walk(start_path, topdown=True):
            dirnames[:] = [d for d in dirnames if d not in exclude_dirs and 'test' not in d.lower() and 'lite' not in d.lower()]
            for filename in filenames:
                if filename.endswith(required_exts_tuple):
                    all_source_files.append(os.path.join(dirpath, filename))
    
    files_to_process = [f for f in all_source_files if f not in processed_files_set]
    print(f"INFO: Total C/C++ source files found: {len(all_source_files)}")
    print(f"INFO: Files to process after skipping existing ones: {len(files_to_process)}")

    if not files_to_process:
        print("INFO: No new files to process. All up-to-date. Exiting.")
        sys.exit(0)

    print(f"INFO: Starting ingestion for C/C++ files with CodeSplitter...")
    
    init_worker(path_to_subsystem)
    
    node_batch: List[BaseNode] = []
    
    for file_path in tqdm(files_to_process, desc="Parsing and Embedding C/C++"):
        try:
            nodes = process_file(file_path)
            if nodes:
                node_batch.extend(nodes)
            
            if len(node_batch) >= BATCH_SIZE:
                print(f"\nINFO: Reached batch size {len(node_batch)}. Adding to vector store...")
                vector_store.add(node_batch)
                print("INFO: Batch added successfully.")
                node_batch = []

        except Exception as e:
            print(f"\n--- !!! FAILED to process file: {file_path} !!! ---")
            print(f"Error: {e}")

    if node_batch:
        print(f"\nINFO: Adding the final batch of {len(node_batch)} nodes to the vector store...")
        vector_store.add(node_batch)
        print("INFO: Final batch added successfully.")

    print("\n==================================================")
    print("INFO: Ingestion pipeline finished!")
    print(f"Vector database is ready at: {CHROMA_DB_PATH}")
    print("You can now run query_rag.py to ask questions.")
    print("==================================================")