# python
import os
import json
from typing import List, Dict, Tuple
import numpy as np
import faiss
import torch
from transformers import AutoTokenizer, AutoModel
import argparse, sys

def load_embedding_model(model_dir: str, DEVICE: str = "cuda"):
    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    model = AutoModel.from_pretrained(model_dir, trust_remote_code=True)
    model.to(DEVICE)
    model.eval()
    return tokenizer, model

def _mean_pooling(model_output, attention_mask):
    token_embeddings = model_output.last_hidden_state
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, dim=1)
    sum_mask = torch.clamp(input_mask_expanded.sum(dim=1), min=1e-9)
    return sum_embeddings / sum_mask

def embed_texts(texts: List[str], tokenizer, model, BATCH_SIZE: int = 1,DEVICE: str = "cuda") -> List[np.ndarray]:
    embeddings = []
    for i in range(0, len(texts), BATCH_SIZE):
        # print(f"Embedding texts {i} to {min(i+BATCH_SIZE, len(texts))} / {len(texts)}")
        batch = texts[i:i+BATCH_SIZE]
        encoded = tokenizer(batch, padding=True, truncation=True, return_tensors="pt",max_length=4096)
        input_ids = encoded["input_ids"].to(DEVICE)
        attention_mask = encoded["attention_mask"].to(DEVICE)
        with torch.no_grad():
            out = model(input_ids=input_ids, attention_mask=attention_mask)
            pooled = _mean_pooling(out, attention_mask)
            pooled = torch.nn.functional.normalize(pooled, p=2, dim=1)
            embeddings.append(pooled.cpu().numpy())
    if embeddings:
        return np.vstack(embeddings)
    return np.zeros((0, model.config.hidden_size), dtype=np.float32)


def build_combined_index(train_path: str, model_dir: str, out_dir: str):
    os.makedirs(out_dir, exist_ok=True)
    tokenizer, model = load_embedding_model(model_dir,"cuda" if torch.cuda.is_available() else "cpu")

    texts = []
    metas = []
    with open(train_path, 'r', encoding='utf-8') as f:
        for idx, line in enumerate(f):
            try:
                item = json.loads(line.strip())
            except Exception:
                continue
            # 这里给到的应该是修复前的代码片段，这样我们去找最相似的补丁最为合理
            patch_text = item.get("before_code")
            if not patch_text:
                continue
            texts.append(patch_text)
            meta = {
                "pr_number": item.get("pr_number"),
                "need_check": item.get("need_check"),
                "filename": (item.get("before_file") or item.get("after_file") or {}).get("filename"),
                "text_field": ("before_file.patch" if item.get("need_check") else "after_file.patch")
            }
            # 若需完整原始记录可放开下一行（会增大 meta 文件）
            meta["original_item"] = item
            metas.append(meta)

    if not texts:
        print("没有可用的 patch 文本，跳过索引构建。")
        return

    vecs = embed_texts(texts, tokenizer, model).astype('float32')
    dim = vecs.shape[1]
    index = faiss.IndexFlatIP(dim)  # 向量已归一化，可用内积进行余弦检索
    index.add(vecs)

    idx_path = os.path.join(out_dir, INDEX_NAME)
    meta_path = os.path.join(out_dir, META_NAME)
    faiss.write_index(index, idx_path)

    with open(meta_path, 'w', encoding='utf-8') as mf:
        for m in metas:
            mf.write(json.dumps(m, ensure_ascii=False) + '\n')

    print(f"保存索引到 `{idx_path}`，元数据到 `{meta_path}`，向量数: {len(metas)}")

def load_index_and_meta(index_path: str, meta_path: str) -> Tuple[faiss.Index, List[Dict]]:
    idx = faiss.read_index(index_path)
    meta_list = []
    with open(meta_path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                meta_list.append(json.loads(line.strip()))
            except Exception:
                meta_list.append({})
    return idx, meta_list

def search_rag(index_path: str, meta_path: str, query: str, tokenizer, model, top_k: int = 5):
    idx, metas = load_index_and_meta(index_path, meta_path)
    q_vec = embed_texts([query], tokenizer, model).astype('float32')
    D, I = idx.search(q_vec, top_k)
    results = []
    for score, pos in zip(D[0], I[0]):
        if pos < 0:
            continue
        results.append({"score": float(score), "meta": metas[pos]})
    return results

if __name__ == "__main__":
    # REPO_List = [
    #     # "account_os_account",
    #     "arkui_ace_engine",
    #     # "build",
    #     # "communication_wifi",
    #     # "developtools_ace_ets2bundle",
    #     "multimedia_audio_framework",
    #     # "web_webview",
    #     # "xts_acts",
    #     # "kernel_linux_5.10"
    # ]
    # OWNER = "openharmony"
    # MODEL_DIR = "/root/autodl-tmp/model/Qwen3-Embedding-0.6B"
    # for repo in REPO_List:
    #     TRAIN_DATA = f"./RQ2/{repo}/{OWNER}_{repo}_train_data.jsonl"
    #     OUT_DIR = f"./RQ1&RQ2_RAG/{repo}"
    #     INDEX_NAME = f"{OWNER}_{repo}_combined_index.faiss"
    #     META_NAME = f"{OWNER}_{repo}_combined_meta.jsonl"
    #     DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
    #     BATCH_SIZE = 8
    #     os.makedirs(OUT_DIR, exist_ok=True)
    #     build_combined_index(TRAIN_DATA, MODEL_DIR, OUT_DIR)
    #     # 示例检索（取消注释运行）
    #     tok, mod = load_embedding_model(MODEL_DIR)
    #     res = search_rag(os.path.join(OUT_DIR, INDEX_NAME), os.path.join(OUT_DIR, META_NAME), "搜索的补丁片段或问题", tok, mod, top_k=3)
    #     print(res)
    parser = argparse.ArgumentParser()
    parser.add_argument("--index", required=True)
    parser.add_argument("--meta", required=True)
    parser.add_argument("--model")
    parser.add_argument("--query", required=True)
    parser.add_argument("--topk", type=int, default=5)
    args = parser.parse_args()
    try:
        tok, mod = load_embedding_model(args.model, "cuda" if torch.cuda.is_available() else "cpu")
        res = search_rag(args.index, args.meta, args.query, tok, mod, top_k=args.topk)
        print(json.dumps(res, ensure_ascii=False))
    except Exception as e:
        print(json.dumps({"error": str(e)}), file=sys.stderr)
        sys.exit(1)