Shaltiel commited on
Commit
9ff3dc7
·
1 Parent(s): 01b7fc0

Fixed patch

Browse files
Files changed (1) hide show
  1. main_backend_lighteval.py +3 -3
main_backend_lighteval.py CHANGED
@@ -3,7 +3,6 @@ import pprint
3
 
4
  import lighteval.models.endpoint_model
5
  class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpointModel):
6
-
7
  @property
8
  def add_special_tokens(self):
9
  return True
@@ -11,10 +10,11 @@ class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpoi
11
  def greedy_until(self, requests: list, *args, **kwargs):
12
  for request in requests:
13
  request.tokenized_context = self.tok_encode(request.context)
14
- super().greedy_until(requests, *args, **kwargs)
 
15
 
16
  def __process_batch_generate(self, requests: list, returns_logits: bool):
17
- return super().__process_batch_generate(requests)
18
 
19
  @property
20
  def disable_tqdm(self) -> bool:
 
3
 
4
  import lighteval.models.endpoint_model
5
  class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpointModel):
 
6
  @property
7
  def add_special_tokens(self):
8
  return True
 
10
  def greedy_until(self, requests: list, *args, **kwargs):
11
  for request in requests:
12
  request.tokenized_context = self.tok_encode(request.context)
13
+ # using this and not super() because we don't want self to change
14
+ return lighteval.models.endpoint_model.InferenceEndpointModel.greedy_until(self, requests, *args, **kwargs)
15
 
16
  def __process_batch_generate(self, requests: list, returns_logits: bool):
17
+ return lighteval.models.endpoint_model.InferenceEndpointModel.__process_batch_generate(self, requests)
18
 
19
  @property
20
  def disable_tqdm(self) -> bool: