|
|
|
|
|
"""Evaluate a SentenceTransformer model on NanoCodeSearchNet (NDCG@10). |
|
|
|
|
|
This mirrors the NanoBEIR evaluation style from sentence-transformers, adapted to |
|
|
hotchpotch/NanoCodeSearchNet's layout (configs: corpus/queries/qrels, splits: NanoCodeSearchNet{Lang}). |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import json |
|
|
import logging |
|
|
import time |
|
|
from collections.abc import Callable, Sequence |
|
|
from typing import Any, cast |
|
|
|
|
|
import numpy as np |
|
|
from sentence_transformers import SentenceTransformer |
|
|
from sentence_transformers.evaluation import InformationRetrievalEvaluator |
|
|
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator |
|
|
from sentence_transformers.similarity_functions import SimilarityFunction |
|
|
from sentence_transformers.util import is_datasets_available |
|
|
from torch import Tensor |
|
|
from tqdm import tqdm |
|
|
|
|
|
DATASET_ID = "hotchpotch/NanoCodeSearchNet" |
|
|
|
|
|
LANGS = ["Go", "Java", "JavaScript", "PHP", "Python", "Ruby"] |
|
|
_LANGS_BY_LOWER = {name.lower(): name for name in LANGS} |
|
|
ALIASES = { |
|
|
"js": "JavaScript", |
|
|
"py": "Python", |
|
|
} |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
def _normalize_lang(name: str) -> str: |
|
|
key = name.lower() |
|
|
key = ALIASES.get(key, key) |
|
|
return _LANGS_BY_LOWER.get(key, name) |
|
|
|
|
|
|
|
|
def _split_name(lang: str) -> str: |
|
|
return f"NanoCodeSearchNet{lang}" |
|
|
|
|
|
|
|
|
def _human_readable(lang: str) -> str: |
|
|
return f"NanoCodeSearchNet-{lang}" |
|
|
|
|
|
|
|
|
class NanoCodeSearchNetEvaluator(SentenceEvaluator): |
|
|
"""Evaluate a model on NanoCodeSearchNet across languages.""" |
|
|
|
|
|
information_retrieval_class = InformationRetrievalEvaluator |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dataset_names: list[str] | None = None, |
|
|
dataset_id: str = DATASET_ID, |
|
|
mrr_at_k: list[int] | None = None, |
|
|
ndcg_at_k: list[int] | None = None, |
|
|
accuracy_at_k: list[int] | None = None, |
|
|
precision_recall_at_k: list[int] | None = None, |
|
|
map_at_k: list[int] | None = None, |
|
|
show_progress_bar: bool = False, |
|
|
batch_size: int = 32, |
|
|
write_csv: bool = True, |
|
|
truncate_dim: int | None = None, |
|
|
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None, |
|
|
main_score_function: str | SimilarityFunction | None = None, |
|
|
aggregate_fn: Callable[[list[float]], float] = np.mean, |
|
|
aggregate_key: str = "mean", |
|
|
query_prompts: str | dict[str, str] | None = None, |
|
|
corpus_prompts: str | dict[str, str] | None = None, |
|
|
write_predictions: bool = False, |
|
|
ndcg_only: bool = True, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
|
|
|
if dataset_names is None: |
|
|
dataset_names = LANGS |
|
|
self.dataset_names = [_normalize_lang(name) for name in dataset_names] |
|
|
self.dataset_id = dataset_id |
|
|
self.aggregate_fn = aggregate_fn |
|
|
self.aggregate_key = aggregate_key |
|
|
self.write_csv = write_csv |
|
|
|
|
|
self.query_prompts = self._normalize_prompts(query_prompts) |
|
|
self.corpus_prompts = self._normalize_prompts(corpus_prompts) |
|
|
|
|
|
self.show_progress_bar = show_progress_bar |
|
|
self.score_functions = score_functions or {} |
|
|
self.score_function_names = sorted(self.score_functions.keys()) |
|
|
self.main_score_function = main_score_function |
|
|
self.truncate_dim = truncate_dim |
|
|
self.name = f"NanoCodeSearchNet_{aggregate_key}" |
|
|
if self.truncate_dim: |
|
|
self.name += f"_{self.truncate_dim}" |
|
|
|
|
|
self.ndcg_only = ndcg_only |
|
|
self.mrr_at_k = mrr_at_k or [10] |
|
|
self.ndcg_at_k = ndcg_at_k or [10] |
|
|
if ndcg_only: |
|
|
self.accuracy_at_k = [10] |
|
|
self.precision_recall_at_k = [10] |
|
|
self.map_at_k = [10] |
|
|
else: |
|
|
self.accuracy_at_k = accuracy_at_k or [1, 3, 5, 10] |
|
|
self.precision_recall_at_k = precision_recall_at_k or [1, 3, 5, 10] |
|
|
self.map_at_k = map_at_k or [100] |
|
|
|
|
|
self._validate_dataset_names() |
|
|
self._validate_prompts() |
|
|
|
|
|
ir_kwargs = { |
|
|
"mrr_at_k": self.mrr_at_k, |
|
|
"ndcg_at_k": self.ndcg_at_k, |
|
|
"accuracy_at_k": self.accuracy_at_k, |
|
|
"precision_recall_at_k": self.precision_recall_at_k, |
|
|
"map_at_k": self.map_at_k, |
|
|
"show_progress_bar": show_progress_bar, |
|
|
"batch_size": batch_size, |
|
|
"write_csv": write_csv, |
|
|
"truncate_dim": truncate_dim, |
|
|
"score_functions": score_functions, |
|
|
"main_score_function": main_score_function, |
|
|
"write_predictions": write_predictions, |
|
|
} |
|
|
|
|
|
self.evaluators = [ |
|
|
self._load_dataset(name, **ir_kwargs) |
|
|
for name in tqdm(self.dataset_names, desc="Loading NanoCodeSearchNet", leave=False) |
|
|
] |
|
|
|
|
|
self.csv_file = f"NanoCodeSearchNet_evaluation_{aggregate_key}_results.csv" |
|
|
self.csv_headers = ["epoch", "steps"] |
|
|
self._append_csv_headers(self.score_function_names) |
|
|
|
|
|
def _normalize_prompts(self, prompts: str | dict[str, str] | None) -> dict[str, str] | None: |
|
|
if prompts is None: |
|
|
return None |
|
|
if isinstance(prompts, str): |
|
|
return {name: prompts for name in self.dataset_names} |
|
|
normalized: dict[str, str] = {} |
|
|
for key, value in prompts.items(): |
|
|
normalized[_normalize_lang(key)] = value |
|
|
return normalized |
|
|
|
|
|
def _append_csv_headers(self, score_function_names): |
|
|
for score_name in score_function_names: |
|
|
for k in self.accuracy_at_k: |
|
|
self.csv_headers.append(f"{score_name}-Accuracy@{k}") |
|
|
for k in self.precision_recall_at_k: |
|
|
self.csv_headers.append(f"{score_name}-Precision@{k}") |
|
|
self.csv_headers.append(f"{score_name}-Recall@{k}") |
|
|
for k in self.mrr_at_k: |
|
|
self.csv_headers.append(f"{score_name}-MRR@{k}") |
|
|
for k in self.ndcg_at_k: |
|
|
self.csv_headers.append(f"{score_name}-NDCG@{k}") |
|
|
for k in self.map_at_k: |
|
|
self.csv_headers.append(f"{score_name}-MAP@{k}") |
|
|
|
|
|
def _load_dataset(self, lang: str, **ir_kwargs) -> InformationRetrievalEvaluator: |
|
|
if not is_datasets_available(): |
|
|
raise ValueError("datasets is required; install via `pip install datasets`.") |
|
|
|
|
|
from datasets import load_dataset |
|
|
|
|
|
split_name = _split_name(lang) |
|
|
t0 = time.perf_counter() |
|
|
corpus_ds = load_dataset(self.dataset_id, "corpus", split=split_name) |
|
|
queries_ds = load_dataset(self.dataset_id, "queries", split=split_name) |
|
|
qrels_ds = load_dataset(self.dataset_id, "qrels", split=split_name) |
|
|
logger.info("[NanoCodeSearchNet] loaded datasets for %s in %.2fs", lang, time.perf_counter() - t0) |
|
|
|
|
|
corpus_dict = {} |
|
|
t1 = time.perf_counter() |
|
|
for sample in corpus_ds: |
|
|
row = cast(dict[str, Any], sample) |
|
|
text = row.get("text") |
|
|
if text: |
|
|
corpus_dict[row["_id"]] = text |
|
|
|
|
|
queries_dict = {} |
|
|
for sample in queries_ds: |
|
|
row = cast(dict[str, Any], sample) |
|
|
text = row.get("text") |
|
|
if text: |
|
|
queries_dict[row["_id"]] = text |
|
|
|
|
|
qrels_dict: dict[str, set[str]] = {} |
|
|
for sample in qrels_ds: |
|
|
row = cast(dict[str, Any], sample) |
|
|
qid = row["query-id"] |
|
|
cids = row["corpus-id"] |
|
|
if isinstance(cids, list): |
|
|
qrels_dict.setdefault(qid, set()).update(cids) |
|
|
else: |
|
|
qrels_dict.setdefault(qid, set()).add(cids) |
|
|
|
|
|
logger.info( |
|
|
"[NanoCodeSearchNet] materialized dicts for %s in %.2fs (corpus=%d, queries=%d, qrels=%d)", |
|
|
lang, |
|
|
time.perf_counter() - t1, |
|
|
len(corpus_dict), |
|
|
len(queries_dict), |
|
|
len(qrels_dict), |
|
|
) |
|
|
|
|
|
if self.query_prompts is not None: |
|
|
ir_kwargs["query_prompt"] = self.query_prompts.get(lang, None) |
|
|
if self.corpus_prompts is not None: |
|
|
ir_kwargs["corpus_prompt"] = self.corpus_prompts.get(lang, None) |
|
|
|
|
|
evaluator = InformationRetrievalEvaluator( |
|
|
queries_dict, |
|
|
corpus_dict, |
|
|
qrels_dict, |
|
|
name=_split_name(lang), |
|
|
**ir_kwargs, |
|
|
) |
|
|
return evaluator |
|
|
|
|
|
def _validate_dataset_names(self) -> None: |
|
|
valid = set(LANGS) |
|
|
missing = [name for name in self.dataset_names if name not in valid] |
|
|
if missing: |
|
|
raise ValueError(f"Invalid language(s): {missing}. Valid: {sorted(valid)}") |
|
|
|
|
|
def _validate_prompts(self) -> None: |
|
|
error_msg = "" |
|
|
if self.query_prompts is not None: |
|
|
missing = [lang for lang in self.dataset_names if lang not in self.query_prompts] |
|
|
if missing: |
|
|
error_msg += f"Missing query prompts for: {missing}\n" |
|
|
if self.corpus_prompts is not None: |
|
|
missing = [lang for lang in self.dataset_names if lang not in self.corpus_prompts] |
|
|
if missing: |
|
|
error_msg += f"Missing corpus prompts for: {missing}\n" |
|
|
if error_msg: |
|
|
raise ValueError(error_msg.strip()) |
|
|
|
|
|
def __call__( |
|
|
self, |
|
|
model: SentenceTransformer, |
|
|
output_path: str | None = None, |
|
|
epoch: int = -1, |
|
|
steps: int = -1, |
|
|
*args, |
|
|
**kwargs, |
|
|
) -> dict[str, float]: |
|
|
per_metric_agg: dict[str, list[float]] = {} |
|
|
per_dataset: dict[str, float] = {} |
|
|
|
|
|
if self.score_functions is None: |
|
|
self.score_functions = {model.similarity_fn_name: model.similarity} |
|
|
self.score_function_names = [model.similarity_fn_name] |
|
|
self._append_csv_headers(self.score_function_names) |
|
|
|
|
|
for evaluator in tqdm(self.evaluators, desc="Evaluating NanoCodeSearchNet", disable=not self.show_progress_bar): |
|
|
logger.info("Evaluating %s", evaluator.name) |
|
|
results = evaluator(model, output_path, epoch, steps) |
|
|
for key, value in results.items(): |
|
|
per_dataset[key] = value |
|
|
|
|
|
if "_" in key: |
|
|
_, metric_name = key.split("_", 1) |
|
|
else: |
|
|
metric_name = key |
|
|
per_metric_agg.setdefault(metric_name, []).append(value) |
|
|
|
|
|
agg_results = { |
|
|
f"{self.name}_{metric}": self.aggregate_fn(vals) |
|
|
for metric, vals in per_metric_agg.items() |
|
|
} |
|
|
|
|
|
if not self.primary_metric: |
|
|
main_score_fn = self.main_score_function |
|
|
main = None if main_score_fn is None else str(main_score_fn) |
|
|
ndcg_target = f"ndcg@{max(self.ndcg_at_k)}" |
|
|
candidates = [k for k in agg_results if k.endswith(ndcg_target)] |
|
|
if main: |
|
|
preferred = [k for k in candidates if main in k] |
|
|
if preferred: |
|
|
self.primary_metric = preferred[0] |
|
|
if not self.primary_metric and candidates: |
|
|
self.primary_metric = candidates[0] |
|
|
|
|
|
if self.primary_metric and self.primary_metric in agg_results: |
|
|
logger.info("Primary %s: %.4f", self.primary_metric, agg_results[self.primary_metric]) |
|
|
|
|
|
per_dataset.update(agg_results) |
|
|
if self.ndcg_only: |
|
|
per_dataset = {k: v for k, v in per_dataset.items() if "ndcg@10" in k} |
|
|
return per_dataset |
|
|
|
|
|
|
|
|
def parse_args() -> argparse.Namespace: |
|
|
parser = argparse.ArgumentParser(description="Evaluate a model on NanoCodeSearchNet") |
|
|
parser.add_argument("--model-path", required=True, help="Path or HF id for SentenceTransformer model") |
|
|
parser.add_argument("--langs", nargs="*", default=None, help="Languages (default: all)") |
|
|
parser.add_argument("--batch-size", type=int, default=128, help="Eval batch size") |
|
|
parser.add_argument("--output", default=None, help="Optional JSON output path for metrics") |
|
|
parser.add_argument("--show-progress", action="store_true", help="Show per-language tqdm during eval") |
|
|
parser.add_argument( |
|
|
"--no-autocast", |
|
|
action="store_true", |
|
|
help="Disable torch.autocast (default: enabled on CUDA with bf16 if available)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--autocast-dtype", |
|
|
choices=["bf16", "fp16"], |
|
|
default="bf16", |
|
|
help="autocast dtype (bf16 or fp16)", |
|
|
) |
|
|
parser.add_argument("--query-prompt", default=None, help="Prefix applied to queries") |
|
|
parser.add_argument("--corpus-prompt", default=None, help="Prefix applied to corpus/passages") |
|
|
parser.add_argument( |
|
|
"--all-metrics", |
|
|
action="store_true", |
|
|
help="Return all metrics (default: ndcg@10 only)", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--trust-remote-code", |
|
|
action="store_true", |
|
|
help="Pass trust_remote_code=True to SentenceTransformer (needed for some HF models)", |
|
|
) |
|
|
return parser.parse_args() |
|
|
|
|
|
|
|
|
def main(argv: Sequence[str] | None = None) -> None: |
|
|
args = parse_args() |
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") |
|
|
|
|
|
langs = args.langs or LANGS |
|
|
|
|
|
model = SentenceTransformer(args.model_path, prompts=None, trust_remote_code=args.trust_remote_code) |
|
|
model.eval() |
|
|
|
|
|
evaluator = NanoCodeSearchNetEvaluator( |
|
|
dataset_names=langs, |
|
|
batch_size=args.batch_size, |
|
|
show_progress_bar=args.show_progress, |
|
|
write_csv=False, |
|
|
query_prompts=args.query_prompt if args.query_prompt else None, |
|
|
corpus_prompts=args.corpus_prompt if args.corpus_prompt else None, |
|
|
ndcg_only=not args.all_metrics, |
|
|
) |
|
|
|
|
|
use_autocast = not args.no_autocast |
|
|
autocast_dtype = {"bf16": "bfloat16", "fp16": "float16"}[args.autocast_dtype] |
|
|
autocast_ctx = None |
|
|
if use_autocast: |
|
|
import torch |
|
|
|
|
|
device_type = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
autocast_ctx = torch.autocast(device_type=device_type, dtype=getattr(torch, autocast_dtype)) |
|
|
|
|
|
if autocast_ctx: |
|
|
with autocast_ctx: |
|
|
results = evaluator(model) |
|
|
else: |
|
|
results = evaluator(model) |
|
|
|
|
|
score_fn = model.similarity_fn_name |
|
|
ndcg_key_suffix = f"{score_fn}_ndcg@10" |
|
|
|
|
|
per_lang = {} |
|
|
for lang in evaluator.dataset_names: |
|
|
key = f"{_split_name(lang)}_{ndcg_key_suffix}" |
|
|
if key in results: |
|
|
per_lang[lang] = results[key] |
|
|
|
|
|
avg = float(np.mean(list(per_lang.values()))) if per_lang else float("nan") |
|
|
|
|
|
print("NanoCodeSearchNet Evaluation (NDCG@10)") |
|
|
print(f"Model: {args.model_path}") |
|
|
for lang in evaluator.dataset_names: |
|
|
val = per_lang.get(lang) |
|
|
if val is None: |
|
|
continue |
|
|
print(f"{_split_name(lang)}_{ndcg_key_suffix}: {val:.4f}") |
|
|
print(f"NanoCodeSearchNet_mean_{ndcg_key_suffix}: {avg:.4f}") |
|
|
|
|
|
if args.output: |
|
|
payload = {"model": args.model_path, "avg": avg, "per_lang": per_lang, "metrics": results} |
|
|
with open(args.output, "w", encoding="utf-8") as f: |
|
|
json.dump(payload, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|