Shaltiel commited on
Commit
7e34f5e
1 Parent(s): 79410f6

Redo metric

Browse files
Files changed (2) hide show
  1. heq_task.py +3 -1
  2. main_backend_lighteval.py +9 -10
heq_task.py CHANGED
@@ -3,6 +3,7 @@ import string
3
  from lighteval.tasks.lighteval_task import LightevalTaskConfig
4
  from lighteval.metrics import Metrics, MetricCategory
5
  from lighteval.metrics.utils import CorpusLevelMetric, MetricUseCase
 
6
  import numpy as np
7
  from lighteval.tasks.requests import Doc
8
  from Levenshtein import distance
@@ -81,6 +82,7 @@ heq_tlnls_metric = CorpusLevelMetric(
81
  corpus_level_fn=np.mean,
82
  sample_level_fn=heq_eval_fn
83
  )
 
84
 
85
  def heq_prompt_fn(line, task_name: str = None):
86
  """Defines how to go from a dataset line to a doc object.
@@ -106,6 +108,6 @@ heq_task = LightevalTaskConfig(
106
  hf_subset="default",
107
  hf_avail_splits=["heq"],
108
  evaluation_splits=["heq"],
109
- metric=[heq_tlnls_metric],
110
  stop_sequence=['\n']
111
  )
 
3
  from lighteval.tasks.lighteval_task import LightevalTaskConfig
4
  from lighteval.metrics import Metrics, MetricCategory
5
  from lighteval.metrics.utils import CorpusLevelMetric, MetricUseCase
6
+ from aenum import extend_enum
7
  import numpy as np
8
  from lighteval.tasks.requests import Doc
9
  from Levenshtein import distance
 
82
  corpus_level_fn=np.mean,
83
  sample_level_fn=heq_eval_fn
84
  )
85
+ extend_enum(Metrics, 'heq_tlnls_metric', heq_tlnls_metric)
86
 
87
  def heq_prompt_fn(line, task_name: str = None):
88
  """Defines how to go from a dataset line to a doc object.
 
108
  hf_subset="default",
109
  hf_avail_splits=["heq"],
110
  evaluation_splits=["heq"],
111
+ metric=['heq_tlnls_metric'],
112
  stop_sequence=['\n']
113
  )
main_backend_lighteval.py CHANGED
@@ -13,16 +13,15 @@ def patched_init(self, *args, **kwargs):
13
  self.name = self.name.replace('.', '-')
14
  lighteval.models.endpoint_model.InferenceEndpointModelConfig.__init__ = patched_init
15
 
16
- import huggingface_hub
17
- orig_create_endpoint = huggingface_hub.create_inference_endpoint
18
- def new_create_endpoint(*args, **kwargs):
19
- print('$$$$$$$$$$$$$$$$$ here 1')
20
- if 'custom_image' in kwargs and kwargs['custom_image']['url'] == "ghcr.io/huggingface/text-generation-inference:1.1.0":
21
- print('$$$$$$$$$$$$$ here 2')
22
- kwargs['custom_image']['url'] = "registry.internal.huggingface.tech/api-inference/community/text-generation-inference:gemma-ie"
23
- return orig_create_endpoint(*args, **kwargs)
24
- huggingface_hub.create_inference_endpoint = new_create_endpoint
25
-
26
 
27
  from huggingface_hub import snapshot_download
28
 
 
13
  self.name = self.name.replace('.', '-')
14
  lighteval.models.endpoint_model.InferenceEndpointModelConfig.__init__ = patched_init
15
 
16
+ # import huggingface_hub
17
+ # orig_create_endpoint = huggingface_hub.create_inference_endpoint
18
+ # def new_create_endpoint(*args, **kwargs):
19
+ # print('$$$$$$$$$$$$$$$$$ here 1')
20
+ # if 'custom_image' in kwargs and kwargs['custom_image']['url'] == "ghcr.io/huggingface/text-generation-inference:1.1.0":
21
+ # print('$$$$$$$$$$$$$ here 2')
22
+ # kwargs['custom_image']['url'] = "registry.internal.huggingface.tech/api-inference/community/text-generation-inference:gemma-ie"
23
+ # return orig_create_endpoint(*args, **kwargs)
24
+ # huggingface_hub.create_inference_endpoint = new_create_endpoint
 
25
 
26
  from huggingface_hub import snapshot_download
27