File size: 3,450 Bytes
da6e1bc
2cdada4
 
 
68a93b5
4d13673
68a93b5
8274634
2cdada4
da6e1bc
68a93b5
34b05c6
2cdada4
c2eeeac
2cdada4
34b05c6
 
c790fdb
2cdada4
 
088f96f
2cdada4
 
 
34b05c6
 
 
 
2cdada4
 
 
 
 
 
34b05c6
2cdada4
 
da6e1bc
2cdada4
 
 
 
 
a0d1624
2cdada4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da6e1bc
088f96f
da6e1bc
2f9dee1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import asyncio
import time
from datetime import timedelta
from os import environ

import pandas as pd
from languages import languages
from models import models
from rich import print
from tasks import tasks
from tqdm.asyncio import tqdm_asyncio
from datasets_.util import load, save, get_valid_task_languages
from tqdm import tqdm

n_sentences = int(environ.get("N_SENTENCES", 10))
n_languages = int(environ.get("N_LANGUAGES", 1000))
n_models = int(environ.get("N_MODELS", 40))

async def evaluate():
    start_time = time.time()

    # Pre-compute model tasks to avoid O(n²) lookups
    model_tasks = models.set_index("id")["tasks"].to_dict()
    
    # Pre-compute valid languages for each task
    valid_task_langs = {task_name: get_valid_task_languages(task_name) for task_name in tasks}
    
    # get all combinations that need evaluation (filtering invalid lang×task combos)
    combis = [
        (task_name, model, lang.bcp_47, i)
        for i in range(n_sentences)
        for lang in languages.head(n_languages).itertuples()
        for task_name, task in tasks.items()
        for model in models.iloc[:n_models]["id"]
        if task_name in model_tasks[model] and lang.bcp_47 in valid_task_langs[task_name]
    ]
    combis = pd.DataFrame(combis, columns=["task", "model", "bcp_47", "sentence_nr"])

    # Load cached results and filter out completed combinations
    old_results = load("results-detailed")
    if not old_results.empty:
        completed = set(old_results[["task", "model", "bcp_47", "sentence_nr"]].apply(tuple, axis=1))
        combis = combis[~combis.apply(lambda row: tuple(row) in completed, axis=1)]

    print(f"Running {len(combis)} evaluation tasks...")

    # batching (asyncio.gather + rate-limiting can in principle run everything at once, but in practice batching is more efficient / necessary)
    batch_size = 2000
    batch_results = [
        await tqdm_asyncio.gather(
            *[tasks[task_name](model, bcp_47, sentence_nr)
              for _, (task_name, model, bcp_47, sentence_nr) in batch.iterrows()]
        )
        for i in tqdm(range(0, len(combis), batch_size), colour='blue', desc='Batches')
        for batch in [combis[i:i + batch_size]]
    ]
    results = [r for batch in batch_results for result in batch for r in result]
    results = pd.DataFrame(results) if results else pd.DataFrame(columns=["task", "model", "bcp_47", "metric", "sentence_nr", "score", "origin"])
    
    # Merge with cached results (immutable log)
    all_results = pd.concat([old_results, results]).drop_duplicates(
        subset=["task", "model", "bcp_47", "metric", "sentence_nr"]
    ) if not old_results.empty else results
    
    # Filter to current models × languages and aggregate
    current_models = set(models.iloc[:n_models]["id"])
    current_languages = set(languages.head(n_languages)["bcp_47"])
    results_agg = (
        all_results[all_results["model"].isin(current_models) & all_results["bcp_47"].isin(current_languages)]
        .groupby(["model", "bcp_47", "task", "metric"])
        .agg({"score": "mean", "origin": "first"})
        .reset_index()
    )
    
    save(all_results, "results-detailed")
    save(results_agg, "results")
    save(models, "models")
    save(languages, "languages")
    elapsed = time.time() - start_time
    print(f"Evaluation completed in {str(timedelta(seconds=int(elapsed)))}")


if __name__ == "__main__":
    results = asyncio.run(evaluate())