Zhuoyang Song commited on
Commit
272edd2
2 Parent(s): a14bfe6 845a45a

FIX: fix conflicts

Browse files
Files changed (3) hide show
  1. README.md +4 -10
  2. tasks.py +34 -5
  3. tlem.py +15 -9
README.md CHANGED
@@ -1,11 +1,5 @@
1
- ---
2
- title: Tlem
3
- emoji: 🏆
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: static
7
- pinned: false
8
- license: apache-2.0
9
- ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+
3
+ # Transparent LLMs Evaluation Metrics
4
+
5
+ > LLMs belong to *tout le monde*
tasks.py CHANGED
@@ -14,16 +14,42 @@ from evaluate import load
14
  from collections import defaultdict
15
  import sys
16
 
 
17
  # if sys.version_info >= (3, 9):
18
  # from functools import cache
19
  # else:
20
  # from functools import lru_cache as cache
21
 
 
22
  disable_progress_bar()
23
 
24
 
25
- def fake_pipeline(prompts: Iterable[str]) -> list[str]:
26
- return [prompt for prompt in tqdm(prompts)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  @dataclass
@@ -33,7 +59,7 @@ class Task:
33
  # metrics: list[str] = field(default_factory=list)
34
  metric_name: str | tuple[str, str] = ("sustech/tlem", "gsm8k")
35
  input_column: str = "question"
36
- label_column: str = "answer"
37
  prompt: Optional[Callable | str] = None
38
  few_shot: int = 0
39
  few_shot_from: Optional[str] = None
@@ -54,6 +80,7 @@ class Task:
54
  input_column=example[self.input_column]
55
  )
56
  }
 
57
 
58
  @cached_property
59
  def samples(self):
@@ -77,7 +104,8 @@ class Task:
77
  if name in ds:
78
  self.few_shot_from = name
79
  break
80
- # if self.few_shot_from:
 
81
  shots = ds[self.few_shot_from].select(range(self.few_shot))
82
  # else:
83
  # shots = ds.select(range(self.few_shot))
@@ -128,6 +156,8 @@ class Task:
128
  result = self.metric.compute(
129
  responses=outputs, references=self.dataset[self.label_column]
130
  )
 
 
131
  # if log:
132
  # name = name or pipeline.__name__
133
  # self.results[name] = result
@@ -502,7 +532,6 @@ class MMLU:
502
  "psychology",
503
  ],
504
  "other": ["other", "business", "health"],
505
- "Test": ["culture"],
506
  }
507
 
508
  @classmethod
 
14
  from collections import defaultdict
15
  import sys
16
 
17
+
18
  # if sys.version_info >= (3, 9):
19
  # from functools import cache
20
  # else:
21
  # from functools import lru_cache as cache
22
 
23
+
24
  disable_progress_bar()
25
 
26
 
27
+ def mt_bench_prompt(example):
28
+ judge_prompt = "You are ChatGPT, a large language model trained by OpenAI. Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. The Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response."
29
+ judge_prompt = "You are ChatGPT, a large language model trained by OpenAI. Your task is to act as an impartial judge and evaluate the quality of the responses provided by an 'assistant' role in the displayed conversation. Your evaluation should focus on the helpfulness, relevance, accuracy, depth, creativity, language fluency, clarity, and level of detail in the assistant's responses. Please note that the evaluation should not consider the user's questions or the overall conversation, but solely the quality of the assistant's replies."
30
+ multi_prompt = "You evaluation should focus on the assistant's answer to the second user question."
31
+ ref_prompt = "In the conversation, you will encounter system messages labeled 'Reference Answer' followed by the assistant's response. Your task is to evaluate the quality of the assistant's response by comparing it with the reference answer."
32
+ json_prompt = 'You must rate the response on a scale of 1 to 10 in JSON format, for example: {"rating": 5}.'
33
+ prompt_list = [judge_prompt]
34
+ conversations = example["conversation"]
35
+ if example["turn"] == 2:
36
+ prompt_list.append(multi_prompt)
37
+
38
+ if example["reference"] is not None:
39
+ conversations = []
40
+ quesiotns = filter(lambda e: e["role"] == "user", example["conversation"])
41
+ answers = filter(lambda e: e["role"] == "assistant", example["conversation"])
42
+ for q, a, r in zip(quesiotns, answers, example["reference"]):
43
+ conversations.append(q)
44
+ conversations.append(
45
+ {"role": "system", "content": "Reference Answer: " + r}
46
+ )
47
+ conversations.append(a)
48
+ prompt_list.append(ref_prompt)
49
+ prompt_list.append(json_prompt)
50
+
51
+ messages = [{"role": "system", "content": " ".join(prompt_list)}] + conversations
52
+ return messages
53
 
54
 
55
  @dataclass
 
59
  # metrics: list[str] = field(default_factory=list)
60
  metric_name: str | tuple[str, str] = ("sustech/tlem", "gsm8k")
61
  input_column: str = "question"
62
+ label_column: str = ""
63
  prompt: Optional[Callable | str] = None
64
  few_shot: int = 0
65
  few_shot_from: Optional[str] = None
 
80
  input_column=example[self.input_column]
81
  )
82
  }
83
+ self.label_column = self.label_column or self.input_column
84
 
85
  @cached_property
86
  def samples(self):
 
104
  if name in ds:
105
  self.few_shot_from = name
106
  break
107
+
108
+ assert self.few_shot_from != self.split
109
  shots = ds[self.few_shot_from].select(range(self.few_shot))
110
  # else:
111
  # shots = ds.select(range(self.few_shot))
 
156
  result = self.metric.compute(
157
  responses=outputs, references=self.dataset[self.label_column]
158
  )
159
+ finally:
160
+ result = outputs
161
  # if log:
162
  # name = name or pipeline.__name__
163
  # self.results[name] = result
 
532
  "psychology",
533
  ],
534
  "other": ["other", "business", "health"],
 
535
  }
536
 
537
  @classmethod
tlem.py CHANGED
@@ -70,15 +70,9 @@ class ReasoningMetric(evaluate.Metric):
70
  return results
71
 
72
 
73
- gsm8k = Task(
74
- dataset_name=("gsm8k", "main"),
75
- metric_name=("sustech/tlem", "gsm8k"),
76
- input_column="question",
77
- label_column="answer",
78
- )
79
-
80
-
81
  class Suite(EvaluationSuite):
 
 
82
  def run(
83
  self,
84
  model_or_pipeline: Any,
@@ -116,7 +110,12 @@ class Suite(EvaluationSuite):
116
  case _ if name.startswith("cmmlu"):
117
  suite = CMMLU.suite(chat=chat)
118
  case "gsm8k":
119
- suite = [gsm8k]
 
 
 
 
 
120
  case "bbh":
121
  suite = BBH.suite()
122
  case "arc":
@@ -129,6 +128,13 @@ class Suite(EvaluationSuite):
129
  suite = Winogrande.suite()
130
  case _ if name.startswith("ceval"):
131
  suite = CEVAL.suite(chat=chat)
 
 
 
 
 
 
 
132
  match name:
133
  case _ if "test" in name:
134
  suite = suite["Test"]
 
70
  return results
71
 
72
 
 
 
 
 
 
 
 
 
73
  class Suite(EvaluationSuite):
74
+ task_class = Task
75
+
76
  def run(
77
  self,
78
  model_or_pipeline: Any,
 
110
  case _ if name.startswith("cmmlu"):
111
  suite = CMMLU.suite(chat=chat)
112
  case "gsm8k":
113
+ suite = Task(
114
+ dataset_name=("gsm8k", "main"),
115
+ metric_name=("sustech/tlem", "gsm8k"),
116
+ input_column="question",
117
+ label_column="answer",
118
+ )
119
  case "bbh":
120
  suite = BBH.suite()
121
  case "arc":
 
128
  suite = Winogrande.suite()
129
  case _ if name.startswith("ceval"):
130
  suite = CEVAL.suite(chat=chat)
131
+ case "mt_bench":
132
+ suite = Task(
133
+ dataset_name="SUSTech/mt_bench_judge",
134
+ split="train",
135
+ prompt=mt_bench_prompt
136
+ # metric_name=("sustech/tlem", "gsm8k"),
137
+ )
138
  match name:
139
  case _ if "test" in name:
140
  suite = suite["Test"]