wanzin commited on
Commit
c401996
·
1 Parent(s): b055b70

adding lm_eval class

Browse files
Files changed (2) hide show
  1. app.py +52 -0
  2. dmx_lm_eval.py +96 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import infer_gradio_input_types, parse_gradio_data, json_to_string_type, parse_readme
3
+ from pathlib import Path
4
+ import sys
5
+
6
+ def launch_gradio_widget(metric):
7
+ """Launches metric widget with Gradio."""
8
+ try:
9
+ import gradio as gr
10
+ except ImportError as error:
11
+ raise ImportError("To create a metric widget with Gradio, make sure gradio is installed.") from error
12
+
13
+ local_path = Path(sys.path[0])
14
+
15
+ if isinstance(metric.features, list):
16
+ (feature_names, feature_types) = zip(*metric.features[0].items())
17
+ else:
18
+ (feature_names, feature_types) = zip(*metric.features.items())
19
+
20
+ gradio_input_types = infer_gradio_input_types(feature_types)
21
+
22
+ def compute(data):
23
+ return metric._compute(
24
+ model='gpt2',
25
+ tasks='wikitext',
26
+ **parse_gradio_data(data, gradio_input_types)
27
+ )
28
+
29
+ iface = gr.Interface(
30
+ fn=compute,
31
+ inputs=gr.Dataframe(
32
+ headers=feature_names,
33
+ col_count=len(feature_names),
34
+ row_count=1,
35
+ datatype=json_to_string_type(gradio_input_types),
36
+ ),
37
+ outputs=gr.Textbox(label=metric.name),
38
+ description=(
39
+ metric.info.description + "\nThis metric is computed using the 'gpt2' model on the 'wikitext' task.\n"
40
+ "Ensure your input is appropriate for the selected task. "
41
+ "If this is a text-based metric, wrap your input in double quotes."
42
+ " Alternatively, you can use a JSON-formatted list as input."
43
+ ),
44
+ title=f"Metric: {metric.name}",
45
+ article=parse_readme(local_path / "README.md"),
46
+ )
47
+
48
+ iface.launch()
49
+
50
+ module = evaluate.load("d-matrix/dmx_lm_eval")
51
+
52
+ launch_gradio_widget(module)
dmx_lm_eval.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import evaluate
2
+ import lm_eval
3
+ from typing import Union, List, Optional
4
+ from dmx.compressor.dmx import config_rules, DmxModel
5
+ import datasets
6
+
7
+ _DESCRIPTION = """
8
+ Evaluation function using lm-eval with d-Matrix integration.
9
+ This function allows for the evaluation of language models across various tasks,
10
+ with the option to use d-Matrix compressed models.
11
+ """
12
+
13
+ _KWARGS_DESCRIPTION = """
14
+ Args:
15
+ model (str): The name or path of the model to evaluate.
16
+ tasks (Union[str, List[str]]): The task or list of tasks to evaluate on.
17
+ dmx_config (Optional[str]): Configuration string for d-Matrix transformations, defaults to None.
18
+ num_fewshot (Optional[int]): Number of examples in few-shot context, defaults to None.
19
+ batch_size (Optional[Union[int, str]]): Batch size for model, defaults to None.
20
+ max_batch_size (Optional[int]): Maximum batch size to try with automatic batch size detection, defaults to None.
21
+ limit (Optional[Union[int, float]]): Limit the number of examples per task, defaults to None.
22
+ revision (str): Model revision to use, defaults to 'main'.
23
+ trust_remote_code (bool): Whether to trust remote code, defaults to False.
24
+ log_samples (bool): If True, logs all model outputs and documents, defaults to True.
25
+ verbosity (str): Logging verbosity level, defaults to 'CRITICAL'.
26
+ **kwargs: Additional keyword arguments to pass to `lm_eval.evaluate`.
27
+
28
+ Returns:
29
+ dict: A dictionary containing the evaluation results.
30
+ """
31
+
32
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
33
+ class DmxMetric(evaluate.Metric):
34
+ def _info(self):
35
+ return evaluate.MetricInfo(
36
+ module_type="metric",
37
+ description=_DESCRIPTION,
38
+ citation="",
39
+ inputs_description=_KWARGS_DESCRIPTION,
40
+ features=datasets.Features(
41
+ {
42
+ "references": datasets.Value("string"),
43
+ }
44
+ ),
45
+ reference_urls=["https://github.com/EleutherAI/lm-evaluation-harness"],
46
+ )
47
+
48
+ def _compute(
49
+ self,
50
+ model: str,
51
+ tasks: Union[str, List[str]],
52
+ dmx_config: Optional[str] = None,
53
+ num_fewshot: Optional[int] = None,
54
+ batch_size: Optional[Union[int, str]] = None,
55
+ max_batch_size: Optional[int] = None,
56
+ limit: Optional[Union[int, float]] = None,
57
+ revision: str = "main",
58
+ trust_remote_code: bool = False,
59
+ log_samples: bool = True,
60
+ verbosity: str = "INFO",
61
+ **kwargs
62
+ ):
63
+ """
64
+ Evaluate a model on multiple tasks and metrics using lm-eval with optional d-Matrix integration.
65
+ """
66
+ model_args = f"pretrained={model},revision={revision},trust_remote_code={str(trust_remote_code)}"
67
+
68
+ lm = lm_eval.api.registry.get_model("hf").create_from_arg_string(
69
+ model_args,
70
+ {
71
+ "batch_size": batch_size,
72
+ "max_batch_size": max_batch_size,
73
+ }
74
+ )
75
+
76
+ if dmx_config:
77
+ lm._model = DmxModel.from_torch(lm._model)
78
+ lm._model.transform(lm._model.dmx_config, *eval(f"config_rules.{dmx_config}"))
79
+
80
+ task_dict = lm_eval.tasks.get_task_dict(tasks if isinstance(tasks, list) else [tasks])
81
+
82
+ for task in task_dict.values():
83
+ if num_fewshot is not None:
84
+ task.set_config(key="num_fewshot", value=num_fewshot)
85
+
86
+ eval_params = {
87
+ 'lm': lm,
88
+ 'task_dict': task_dict,
89
+ 'limit': limit,
90
+ 'log_samples': log_samples,
91
+ 'verbosity': verbosity,
92
+ **kwargs
93
+ }
94
+
95
+ results = lm_eval.evaluate(**eval_params)
96
+ return results.get('results', {})