Spaces:
Paused
Paused
fixed recursion
Browse files
main_backend_lighteval.py
CHANGED
@@ -2,7 +2,10 @@ import logging
|
|
2 |
import pprint
|
3 |
|
4 |
import lighteval.models.endpoint_model
|
|
|
|
|
5 |
class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpointModel):
|
|
|
6 |
@property
|
7 |
def add_special_tokens(self):
|
8 |
return True
|
@@ -11,10 +14,10 @@ class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpoi
|
|
11 |
for request in requests:
|
12 |
request.tokenized_context = self.tok_encode(request.context)
|
13 |
# using this and not super() because we don't want self to change
|
14 |
-
return
|
15 |
|
16 |
def __process_batch_generate(self, requests: list, returns_logits: bool):
|
17 |
-
return
|
18 |
|
19 |
@property
|
20 |
def disable_tqdm(self) -> bool:
|
|
|
2 |
import pprint
|
3 |
|
4 |
import lighteval.models.endpoint_model
|
5 |
+
orig_endpoint_model_greedy_until = lighteval.models.endpoint_model.InferenceEndpointModel.greedy_until
|
6 |
+
orig_endpoint_model_process_batch_generate = lighteval.models.endpoint_model.InferenceEndpointModel.__process_batch_generate
|
7 |
class GoodInferenceEndpointModel(lighteval.models.endpoint_model.InferenceEndpointModel):
|
8 |
+
|
9 |
@property
|
10 |
def add_special_tokens(self):
|
11 |
return True
|
|
|
14 |
for request in requests:
|
15 |
request.tokenized_context = self.tok_encode(request.context)
|
16 |
# using this and not super() because we don't want self to change
|
17 |
+
return orig_endpoint_model_greedy_until(self, requests, *args, **kwargs)
|
18 |
|
19 |
def __process_batch_generate(self, requests: list, returns_logits: bool):
|
20 |
+
return orig_endpoint_model_process_batch_generate(self, requests)
|
21 |
|
22 |
@property
|
23 |
def disable_tqdm(self) -> bool:
|