# python
import argparse
import json
import os
import sys
from typing import List, Dict, Tuple

import faiss
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModel


def load_embedding_model(model_dir: str):
    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    model = AutoModel.from_pretrained(model_dir, trust_remote_code=True)
    model.to(DEVICE)
    model.eval()
    return tokenizer, model


def _mean_pooling(model_output, attention_mask):
    token_embeddings = model_output.last_hidden_state
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, dim=1)
    sum_mask = torch.clamp(input_mask_expanded.sum(dim=1), min=1e-9)
    return sum_embeddings / sum_mask


def embed_texts(texts: List[str], tokenizer, model) -> List[np.ndarray]:
    embeddings = []
    for i in range(0, len(texts), BATCH_SIZE):
        # print(f"Embedding texts {i} to {min(i+BATCH_SIZE, len(texts))} / {len(texts)}")
        batch = texts[i:i + BATCH_SIZE]
        encoded = tokenizer(batch, padding=True, truncation=True, return_tensors="pt", max_length=4096)
        input_ids = encoded["input_ids"].to(DEVICE)
        attention_mask = encoded["attention_mask"].to(DEVICE)
        with torch.no_grad():
            out = model(input_ids=input_ids, attention_mask=attention_mask)
            pooled = _mean_pooling(out, attention_mask)
            pooled = torch.nn.functional.normalize(pooled, p=2, dim=1)
            embeddings.append(pooled.cpu().numpy())
    if embeddings:
        return np.vstack(embeddings)
    return np.zeros((0, model.config.hidden_size), dtype=np.float32)


def build_combined_index(train_path: str, model_dir: str, out_dir: str,type:str,index_name:str,meta_name:str):
    os.makedirs(out_dir, exist_ok=True)
    tokenizer, model = load_embedding_model(model_dir)

    texts = []
    metas = []
    with open(train_path, 'r', encoding='utf-8') as f:
        for idx, line in enumerate(f):
            try:
                item = json.loads(line.strip())
            except Exception:
                continue
            # 这里给到的应该是修复前的代码片段，这样我们去找最相似的补丁最为合理
            if type=="RQ1":
                patch_text=item.get("diff_snippet")
            else:
                patch_text = item.get("before_code")
            if not patch_text:
                continue
            texts.append(patch_text)
            meta = {
                "pr_number": item.get("pr_number"),
                "repo_name": item.get("repo_name"),
                "need_check": item.get("need_check"),
                "diff_snippet": item.get("diff_snippet"),
                "filename": (item.get("before_file") or item.get("after_file") or {}).get("filename"),
                "text_field": ("before_file.patch" if item.get("need_check") else "after_file.patch"),
                "before_code": item.get("before_code"),
            }
            # 若需完整原始记录可放开下一行（会增大 meta 文件）
            meta["original_item"] = item
            metas.append(meta)

    if not texts:
        print("没有可用的 patch 文本，跳过索引构建。")
        return

    vecs = embed_texts(texts, tokenizer, model).astype('float32')
    dim = vecs.shape[1]
    index = faiss.IndexFlatIP(dim)  # 向量已归一化，可用内积进行余弦检索
    index.add(vecs)

    idx_path = os.path.join(out_dir, index_name)
    meta_path = os.path.join(out_dir, meta_name)
    faiss.write_index(index, idx_path)

    with open(meta_path, 'w', encoding='utf-8') as mf:
        for m in metas:
            mf.write(json.dumps(m, ensure_ascii=False) + '\n')

    print(f"保存索引到 `{idx_path}`，元数据到 `{meta_path}`，向量数: {len(metas)}")


def load_index_and_meta(index_path: str, meta_path: str) -> Tuple[faiss.Index, List[Dict]]:
    idx = faiss.read_index(index_path)
    meta_list = []
    with open(meta_path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                meta_list.append(json.loads(line.strip()))
            except Exception:
                meta_list.append({})
    return idx, meta_list


def search_experience(index_path: str, meta_path: str, query: str, tokenizer, model, top_k: int = 5):
    idx, metas = load_index_and_meta(index_path, meta_path)
    q_vec = embed_texts([query], tokenizer, model).astype('float32')
    D, I = idx.search(q_vec, top_k)
    results = []
    for score, pos in zip(D[0], I[0]):
        if pos < 0:
            continue
        results.append({"score": float(score), "meta": metas[pos]})
    return results


def update_experience(index_path, meta_path, experiences_str,RQ):
    experiences = json.loads(experiences_str)
    # 读取现有的元数据
    meta_list = []
    with open(meta_path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                meta_list.append(json.loads(line.strip()))
            except Exception:
                meta_list.append({})
    if RQ=="RQ1":
        # 将experiences转换为便于查找的字典
        exp_dict = {}
        for exp in experiences:
            key = (exp.get("pr_number"), exp.get("repo_name"), exp.get("diff_snippet"))
            exp_dict[key] = exp.get("experience")

        # 更新匹配的元数据条目
        updated_count = 0
        for meta in meta_list:
            key = (meta.get("pr_number"), meta.get("repo_name"), meta.get("diff_snippet"))
            if key in exp_dict and exp_dict[key]:
                meta["experience"] = exp_dict[key]
                if "update_count" not in meta:
                    # 如果已经有经验数据，跳过更新
                    meta["update_count"] = 0
                else:
                    meta["update_count"] += 1
                updated_count += 1

        # 写回更新后的元数据，不会新增数据，只会更新已有的meta条目
        with open(meta_path, 'w', encoding='utf-8') as f:
            for meta in meta_list:
                f.write(json.dumps(meta, ensure_ascii=False) + '\n')
    else:
        # 将experiences转换为便于查找的字典
        exp_dict = {}
        for exp in experiences:
            key = (exp.get("pr_number"), exp.get("repo_name"), exp.get("before_code"))
            exp_dict[key] = exp.get("experience")

        # 更新匹配的元数据条目
        updated_count = 0
        for meta in meta_list:
            key = (meta.get("pr_number"), meta.get("repo_name"), meta.get("before_code"))
            if key in exp_dict and exp_dict[key]:
                meta["experience"] = exp_dict[key]
                if "update_count" not in meta:
                    # 如果已经有经验数据，跳过更新
                    meta["update_count"] = 0
                else:
                    meta["update_count"] += 1
                updated_count += 1

        # 写回更新后的元数据，不会新增数据，只会更新已有的meta条目
        with open(meta_path, 'w', encoding='utf-8') as f:
            for meta in meta_list:
                f.write(json.dumps(meta, ensure_ascii=False) + '\n')

    # 注意：这不会影响index_path，因为索引是基于代码片段的向量表示，
    # 而我们只更新了元数据中的经验信息，没有更改任何与索引相关的数据。
    return str({"updated_count": updated_count, "message": f"成功更新了{updated_count}条经验数据"})


# MODEL_DIR = "/root/autodl-tmp/model/Qwen3-Embedding-0.6B"
# OUT_DIR = "./history_review_experience"

# TRAIN_DATA = "../../data/research_data/RQ2/train_data.jsonl"
# INDEX_NAME = "code_refinement_hre_index.faiss"
# META_NAME = "code_refinement_hre_meta.jsonl"

# TRAIN_DATA = "/root/autodl-tmp/refinement/data/research_data/RQ2/train_data.jsonl"
# INDEX_NAME = "need_check_hre_index.faiss"
# META_NAME = "need_check_refinement_hre_meta.jsonl"

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
BATCH_SIZE = 4

if __name__ == "__main__":
    # os.makedirs(OUT_DIR, exist_ok=True)
    # build_combined_index(TRAIN_DATA, MODEL_DIR, OUT_DIR,"RQ1",INDEX_NAME,META_NAME)

    parser = argparse.ArgumentParser()
    parser.add_argument("--index", required=True)
    parser.add_argument("--meta", required=True)
    parser.add_argument("--model", required=True)
    parser.add_argument("--query", required=True)
    parser.add_argument("--topk", type=int, default=5)
    parser.add_argument("--operation", type=str, required=True)
    parser.add_argument("--experiences", required=True)
    parser.add_argument("--RQ", type=str, required=True)
    args = parser.parse_args()
    if args.operation == "build_index":
        try:
            # build_combined_index(TRAIN_DATA, args.model, OUT_DIR)
            sys.exit(0)
        except Exception as e:
            print(json.dumps({"error": str(e)}), file=sys.stderr)
            sys.exit(1)
    elif args.operation == "search":
        try:
            tok, mod = load_embedding_model(args.model)
            res = search_experience(args.index, args.meta, args.query, tok, mod, top_k=args.topk)
            print(json.dumps(res, ensure_ascii=False))
        except Exception as e:
            print(json.dumps({"error": str(e)}), file=sys.stderr)
            sys.exit(1)
    elif args.operation == "update":
        try:
            # tok, mod = load_embedding_model(args.model)
            res = update_experience(args.index, args.meta, args.experiences, args.RQ)
            print(json.dumps(res, ensure_ascii=False))
        except Exception as e:
            print(json.dumps({"error": str(e)}), file=sys.stderr)
            sys.exit(1)
