BhanuPrakashSamoju
commited on
Commit
•
4906e39
1
Parent(s):
d0af1ba
Update main.py
Browse files
main.py
CHANGED
@@ -28,9 +28,9 @@ class BasePromptContext:
|
|
28 |
self.variables_list = ["question","answer","context"]
|
29 |
self.base_template = """Please act as an impartial judge and evaluate the quality of the provided answer which attempts to answer the provided question based on a provided context.
|
30 |
|
31 |
-
And you'll need to submit your grading for the correctness, comprehensiveness and readability of the answer, using the
|
32 |
-
Reasoning
|
33 |
-
Score
|
34 |
|
35 |
|
36 |
|
@@ -76,6 +76,7 @@ class Evaluater:
|
|
76 |
self.answer = item.answer
|
77 |
self.domain = item.domain
|
78 |
self.context = item.context
|
|
|
79 |
|
80 |
def get_prompt_template(self):
|
81 |
prompt = BasePromptContext()
|
@@ -85,9 +86,8 @@ class Evaluater:
|
|
85 |
return eval_template
|
86 |
|
87 |
def evaluate(self):
|
88 |
-
llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})
|
89 |
prompt = self.get_prompt_template().format(question = self.question, answer = self.answer, context = self.context)
|
90 |
-
score = llm(prompt)
|
91 |
return score
|
92 |
|
93 |
# Create extractor instance
|
|
|
28 |
self.variables_list = ["question","answer","context"]
|
29 |
self.base_template = """Please act as an impartial judge and evaluate the quality of the provided answer which attempts to answer the provided question based on a provided context.
|
30 |
|
31 |
+
And you'll need to submit your grading for the correctness, comprehensiveness and readability of the answer, using JSON format with the 2 items in parenthesis:
|
32 |
+
( Reasoning: [your one line step by step reasoning about the correctness of the answer],
|
33 |
+
Score: [your score number for the correctness of the answer] )
|
34 |
|
35 |
|
36 |
|
|
|
76 |
self.answer = item.answer
|
77 |
self.domain = item.domain
|
78 |
self.context = item.context
|
79 |
+
self.llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000})
|
80 |
|
81 |
def get_prompt_template(self):
|
82 |
prompt = BasePromptContext()
|
|
|
86 |
return eval_template
|
87 |
|
88 |
def evaluate(self):
|
|
|
89 |
prompt = self.get_prompt_template().format(question = self.question, answer = self.answer, context = self.context)
|
90 |
+
score = self.llm(prompt)
|
91 |
return score
|
92 |
|
93 |
# Create extractor instance
|