kaikaidai commited on
Commit
52750d6
1 Parent(s): b921d7d

Organise prompts

Browse files
Files changed (1) hide show
  1. prompts.py +94 -0
prompts.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Default values for compatible mode
2
+ DEFAULT_EVAL_CRITERIA = """Does the model provide relevant and useful responses to the user's needs or questions?"""
3
+
4
+ DEFAULT_SCORE_1 = "The model's responses are irrelevant or unhelpful to the user's needs or queries."
5
+ DEFAULT_SCORE_2 = "The model sometimes provides helpful information, but often fails to address the user's actual needs or questions."
6
+ DEFAULT_SCORE_3 = "The model generally provides helpful responses that address the user's needs, though it may occasionally miss the mark."
7
+ DEFAULT_SCORE_4 = "The model regularly provides helpful responses that are well-aligned with the user's inquiries, with only rare inaccuracies."
8
+ DEFAULT_SCORE_5 = "The model consistently offers highly relevant and useful responses that perfectly cater to the user's needs and inquiries."
9
+
10
+ # Default Eval Prompt
11
+ DEFAULT_EVAL_PROMPT = """Does the model provide relevant and useful responses to the user's needs or questions?
12
+
13
+ Scoring Rubric:
14
+ Score 1: The model's responses are irrelevant or unhelpful to the user's needs or queries.
15
+ Score 2: The model sometimes provides helpful information, but often fails to address the user's actual needs or questions.
16
+ Score 3: The model generally provides helpful responses that address the user's needs, though it may occasionally miss the mark.
17
+ Score 4: The model regularly provides helpful responses that are well-aligned with the user's inquiries, with only rare inaccuracies.
18
+ Score 5: The model consistently offers highly relevant and useful responses that perfectly cater to the user's needs and inquiries.
19
+
20
+ [User Query]: {{input}}
21
+
22
+ [AI Response]: {{response}}"""
23
+
24
+ # Split the eval prompt into editable and fixed parts
25
+ DEFAULT_EVAL_PROMPT_EDITABLE = """Does the model provide relevant and useful responses to the user's needs or questions?
26
+
27
+ Scoring Rubric:
28
+ Score 1: The model's responses are irrelevant or unhelpful to the user's needs or queries.
29
+ Score 2: The model sometimes provides helpful information, but often fails to address the user's actual needs or questions.
30
+ Score 3: The model generally provides helpful responses that address the user's needs, though it may occasionally miss the mark.
31
+ Score 4: The model regularly provides helpful responses that are well-aligned with the user's inquiries, with only rare inaccuracies.
32
+ Score 5: The model consistently offers highly relevant and useful responses that perfectly cater to the user's needs and inquiries."""
33
+
34
+ # Fixed suffix that will always be appended
35
+ FIXED_EVAL_SUFFIX = """
36
+ [User Query]: {{input}}
37
+
38
+ [AI Response]: {{response}}"""
39
+
40
+ # Define the Prometheus prompt used by default (without reference)
41
+ PROMETHEUS_PROMPT = """###Task Description:
42
+ An instruction (might include an Input inside it) and a response to evaluate are given.
43
+ 1. Write a detailed feedback that assesses the quality of the response strictly based on the given score rubric, not evaluating in general.
44
+ 2. After writing the feedback, write a score that is an integer between 1 and 5.
45
+ 3. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)"
46
+ 4. Please do not generate any other openings, closings, or explanations.
47
+
48
+ ###The instruction to evaluate:
49
+ {human_input}
50
+
51
+ ###Response to evaluate:
52
+ {ai_response}
53
+
54
+ ###Score Rubrics:
55
+ [{eval_criteria}]
56
+ Score 1: {score1_desc}
57
+ Score 2: {score2_desc}
58
+ Score 3: {score3_desc}
59
+ Score 4: {score4_desc}
60
+ Score 5: {score5_desc}
61
+
62
+ ###Feedback:
63
+ """
64
+
65
+ # Define the Prometheus prompt with reference response
66
+ PROMETHEUS_PROMPT_WITH_REFERENCE = """###Task Description:
67
+ An instruction (might include an Input inside it), a response to evaluate, a reference answer that gets a score of 5, and a score rubric representing an evaluation criteria are given.
68
+ 1. Write a detailed feedback that assesses the quality of the response strictly based on the given score rubric, not evaluating in general.
69
+ 2. After writing the feedback, write a score that is an integer between 1 and 5.
70
+ 3. The output format should look as follows: "Feedback: (write a feedback for criteria) [RESULT] (an integer number between 1 and 5)"
71
+ 4. Please do not generate any other openings, closings, or explanations.
72
+
73
+ ###The instruction to evaluate:
74
+ {human_input}
75
+
76
+ ###Response to evaluate:
77
+ {ai_response}
78
+
79
+ ###Reference Answer (Score 5):
80
+ {ground_truth_input}
81
+
82
+ ###Score Rubrics:
83
+ [{eval_criteria}]
84
+ Score 1: {score1_desc}
85
+ Score 2: {score2_desc}
86
+ Score 3: {score3_desc}
87
+ Score 4: {score4_desc}
88
+ Score 5: {score5_desc}
89
+
90
+ ###Feedback:
91
+ """
92
+
93
+ # Judge system prompt for non-Prometheus models
94
+ JUDGE_SYSTEM_PROMPT = """Please act as an impartial judge and evaluate based on the user's instruction. Your output format should strictly adhere to JSON as follows: {"feedback": "<write feedback>", "result": <numerical score>}. Ensure the output is valid JSON, without additional formatting or explanations."""