Clémentine commited on
Commit
b899767
1 Parent(s): 1ffc326

removed quantization to simplify

Browse files
src/about.py CHANGED
@@ -8,15 +8,16 @@ class Task:
8
  col_name: str
9
 
10
 
11
- # Init: to update with your specific keys
 
12
  class Tasks(Enum):
13
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
  task0 = Task("anli_r1", "acc", "ANLI")
15
  task1 = Task("logiqa", "acc_norm", "LogiQA")
16
 
17
- TASKS_HARNESS = [task.value.benchmark for task in Tasks]
18
-
19
  NUM_FEWSHOT = 0 # Change with your few shot
 
 
20
 
21
 
22
  # Your leaderboard name
 
8
  col_name: str
9
 
10
 
11
+ # Select your tasks here
12
+ # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
  task0 = Task("anli_r1", "acc", "ANLI")
16
  task1 = Task("logiqa", "acc_norm", "LogiQA")
17
 
 
 
18
  NUM_FEWSHOT = 0 # Change with your few shot
19
+ # ---------------------------------------------------
20
+
21
 
22
 
23
  # Your leaderboard name
src/backend/manage_requests.py CHANGED
@@ -14,7 +14,7 @@ class EvalRequest:
14
  json_filepath: str
15
  weight_type: str = "Original"
16
  model_type: str = "" # pretrained, finetuned, with RL
17
- precision: str = "" # float16, bfloat16, 8bit, 4bit, GPTQ
18
  base_model: Optional[str] = None # for adapter models
19
  revision: str = "main" # commit
20
  submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date
@@ -28,11 +28,12 @@ class EvalRequest:
28
 
29
  if self.precision in ["float16", "bfloat16"]:
30
  model_args += f",dtype={self.precision}"
31
- elif self.precision == "8bit":
32
- model_args += ",load_in_8bit=True"
33
- elif self.precision == "4bit":
34
- model_args += ",load_in_4bit=True"
35
- elif self.precision == "GPTQ":
 
36
  # A GPTQ model does not need dtype to be specified,
37
  # it will be inferred from the config
38
  pass
@@ -42,9 +43,7 @@ class EvalRequest:
42
  return model_args
43
 
44
 
45
- def set_eval_request(
46
- api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str
47
- ):
48
  """Updates a given eval request with its new status on the hub (running, completed, failed, ...)"""
49
  json_filepath = eval_request.json_filepath
50
 
 
14
  json_filepath: str
15
  weight_type: str = "Original"
16
  model_type: str = "" # pretrained, finetuned, with RL
17
+ precision: str = "" # float16, bfloat16
18
  base_model: Optional[str] = None # for adapter models
19
  revision: str = "main" # commit
20
  submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date
 
28
 
29
  if self.precision in ["float16", "bfloat16"]:
30
  model_args += f",dtype={self.precision}"
31
+ # Quantized models need some added config, the install of bits and bytes, etc
32
+ #elif self.precision == "8bit":
33
+ # model_args += ",load_in_8bit=True"
34
+ #elif self.precision == "4bit":
35
+ # model_args += ",load_in_4bit=True"
36
+ #elif self.precision == "GPTQ":
37
  # A GPTQ model does not need dtype to be specified,
38
  # it will be inferred from the config
39
  pass
 
43
  return model_args
44
 
45
 
46
+ def set_eval_request(api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str):
 
 
47
  """Updates a given eval request with its new status on the hub (running, completed, failed, ...)"""
48
  json_filepath = eval_request.json_filepath
49
 
src/display/utils.py CHANGED
@@ -94,9 +94,9 @@ class WeightType(Enum):
94
  class Precision(Enum):
95
  float16 = ModelDetails("float16")
96
  bfloat16 = ModelDetails("bfloat16")
97
- qt_8bit = ModelDetails("8bit")
98
- qt_4bit = ModelDetails("4bit")
99
- qt_GPTQ = ModelDetails("GPTQ")
100
  Unknown = ModelDetails("?")
101
 
102
  def from_str(precision):
@@ -104,12 +104,12 @@ class Precision(Enum):
104
  return Precision.float16
105
  if precision in ["torch.bfloat16", "bfloat16"]:
106
  return Precision.bfloat16
107
- if precision in ["8bit"]:
108
- return Precision.qt_8bit
109
- if precision in ["4bit"]:
110
- return Precision.qt_4bit
111
- if precision in ["GPTQ", "None"]:
112
- return Precision.qt_GPTQ
113
  return Precision.Unknown
114
 
115
  # Column selection
 
94
  class Precision(Enum):
95
  float16 = ModelDetails("float16")
96
  bfloat16 = ModelDetails("bfloat16")
97
+ #qt_8bit = ModelDetails("8bit")
98
+ #qt_4bit = ModelDetails("4bit")
99
+ #qt_GPTQ = ModelDetails("GPTQ")
100
  Unknown = ModelDetails("?")
101
 
102
  def from_str(precision):
 
104
  return Precision.float16
105
  if precision in ["torch.bfloat16", "bfloat16"]:
106
  return Precision.bfloat16
107
+ #if precision in ["8bit"]:
108
+ # return Precision.qt_8bit
109
+ #if precision in ["4bit"]:
110
+ # return Precision.qt_4bit
111
+ #if precision in ["GPTQ", "None"]:
112
+ # return Precision.qt_GPTQ
113
  return Precision.Unknown
114
 
115
  # Column selection