DSXiangLi commited on
Commit
e907f96
β€’
1 Parent(s): 698a3ea
Files changed (5) hide show
  1. ape/ape.py +8 -7
  2. ape/instance.py +1 -1
  3. ape/llm.py +2 -2
  4. ape/prompt.py +5 -3
  5. app.py +5 -5
ape/ape.py CHANGED
@@ -35,27 +35,28 @@ def esttimate_cost(instance):
35
  return f'Train={train_cost} Eval={eval_cost}'
36
 
37
 
38
- def generate(instance, openai_key, n_instruct):
39
  global LLM
40
  if LLM is None:
41
- LLM = LLMGPT(openai_key, n_instruct)
42
 
43
  instructions = []
44
  train_iter = instance.get_train_iter()
45
  for few_shot in train_iter:
46
  instruction = LLM.generate_instruction(few_shot)
47
  print(instruction)
48
- instructions.append(instruction)
49
  return '\n'.join(instructions)
50
 
51
 
52
- def single_test(instruction, input, openai_key, n_instruct):
53
  global LLM
54
  if LLM is None:
55
- LLM = LLMGPT(openai_key, n_instruct)
56
 
57
  output = LLM.generate_output(instruction, input)
58
- return output
59
 
60
- def score_single(instruction, openai_key, n_instruct):
 
61
  pass
 
35
  return f'Train={train_cost} Eval={eval_cost}'
36
 
37
 
38
+ def generate(instance, openai_key):
39
  global LLM
40
  if LLM is None:
41
+ LLM = LLMGPT(openai_key)
42
 
43
  instructions = []
44
  train_iter = instance.get_train_iter()
45
  for few_shot in train_iter:
46
  instruction = LLM.generate_instruction(few_shot)
47
  print(instruction)
48
+ instructions.append(instruction['text'])
49
  return '\n'.join(instructions)
50
 
51
 
52
+ def single_test(instruction, input, openai_key):
53
  global LLM
54
  if LLM is None:
55
+ LLM = LLMGPT(openai_key)
56
 
57
  output = LLM.generate_output(instruction, input)
58
+ return output['text']
59
 
60
+
61
+ def score_single(instruction, openai_key):
62
  pass
ape/instance.py CHANGED
@@ -33,7 +33,7 @@ class Instance(object):
33
  index = random.sample(list(range(len(self.samples))), n_train + n_eval)
34
  train_index, eval_index = index[:n_train], index[n_train:]
35
  self.train_samples = [self.samples[i] for i in train_index]
36
- self.eval_samples = [self.samples[i] for i in train_index]
37
 
38
  def get_train_iter(self):
39
  for i in range(self.n_train):
 
33
  index = random.sample(list(range(len(self.samples))), n_train + n_eval)
34
  train_index, eval_index = index[:n_train], index[n_train:]
35
  self.train_samples = [self.samples[i] for i in train_index]
36
+ self.eval_samples = [self.samples[i] for i in eval_index]
37
 
38
  def get_train_iter(self):
39
  for i in range(self.n_train):
ape/llm.py CHANGED
@@ -19,8 +19,8 @@ Cost = {
19
 
20
 
21
  class LLMGPT(object):
22
- def __init__(self, openai_key, n_instruct):
23
- self.gen_llm = ChatOpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, n=n_instruct, verbose=True)
24
  self.eval_llm = OpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, echo=True, verbose=True)
25
  self.test_llm = ChatOpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, verbose=True)
26
  self.gen_chain = None
 
19
 
20
 
21
  class LLMGPT(object):
22
+ def __init__(self, openai_key):
23
+ self.gen_llm = ChatOpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, verbose=True)
24
  self.eval_llm = OpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, echo=True, verbose=True)
25
  self.test_llm = ChatOpenAI(openai_api_key=openai_key, max_tokens=2000, temperature=0.7, verbose=True)
26
  self.gen_chain = None
ape/prompt.py CHANGED
@@ -2,7 +2,9 @@
2
  few_shot_prompt = "Input: {input}\nOutput: {output}\n"
3
 
4
  gen_user_prompt = """I gave a friend who is medical school teacher an instruction.
5
- Based on the instruction they produced the following input-output pairs:\n\n{few_shot}\n\nThe instruction was to """
 
 
6
 
7
  gen_sys_prompt = ""
8
 
@@ -10,8 +12,8 @@ eval_prompt = "Instruction: {instruction}\nInput: {input}\nOutput: {output}"
10
 
11
  test_sys_prompt = ""
12
 
13
- test_user_prompt = """I gave a friend who is medical school teacher an instruction. Base on this instruction, they will
14
- transform the input to output.\n Instruction: {instruction}Input: {input}\nOutput: """
15
 
16
  MyTemplate = {
17
  'gen_user_prompt': gen_user_prompt,
 
2
  few_shot_prompt = "Input: {input}\nOutput: {output}\n"
3
 
4
  gen_user_prompt = """I gave a friend who is medical school teacher an instruction.
5
+ Based on the instruction they produced the following input-output pairs:\n\n{few_shot}\n\n. What instruction can best
6
+ explain the relationship between above input-output pairs? Given me a concise instruction in chinese:
7
+ """
8
 
9
  gen_sys_prompt = ""
10
 
 
12
 
13
  test_sys_prompt = ""
14
 
15
+ test_user_prompt = """I gave a friend who is medical school teacher an instruction. They followed the instruction and
16
+ transform the input to output in chinese.\n Instruction: {instruction}\nInput: {input}\nOutput: """
17
 
18
  MyTemplate = {
19
  'gen_user_prompt': gen_user_prompt,
app.py CHANGED
@@ -19,7 +19,7 @@ with gr.Blocks(title="Automatic Prompt Engineer", theme=gr.themes.Glass()) as de
19
 
20
  with gr.Row():
21
  n_eval = gr.Slider(label="Number of Eval", minimum=5, maximum=30, step=5, value=20)
22
- n_instruct = gr.Slider(label="Number of Prompt", minimum=1, maximum=5, step=1, value=2)
23
 
24
  with gr.Column(scale=3):
25
  gr.Markdown("## Load Data")
@@ -44,7 +44,7 @@ with gr.Blocks(title="Automatic Prompt Engineer", theme=gr.themes.Glass()) as de
44
  with gr.Column(scale=2):
45
  gr.Markdown("## Run APE")
46
  gen_prompt = gr.Textbox(max_lines=100, lines=10,
47
- value=MyTemplate['gen_sys_prompt'], label="Prompt for generation")
48
  eval_prompt = gr.Textbox(max_lines=100, lines=10,
49
  value=MyTemplate['eval_prompt'], label="Prompt for Evaluation")
50
 
@@ -94,14 +94,14 @@ with gr.Blocks(title="Automatic Prompt Engineer", theme=gr.themes.Glass()) as de
94
  cost_button.click(esttimate_cost, [instance], [cost])
95
 
96
  # 4. Run APE -> ζ‰€ζœ‰ζŒ‡δ»€
97
- ape_button.click(generate, [instance, openai_key, n_instruct], [all_prompt])
98
 
99
  # 5. Evaluate -> εΎ—εˆ°ζ‰€ζœ‰ζŒ‡δ»€ηš„Log Prob
100
 
101
  # 6. θΎ“ε…₯ζŒ‡δ»€ε•ζ΅‹
102
- test_button.click(single_test, [test_prompt, test_input, openai_key, n_instruct], [test_output])
103
 
104
  # 7. θΎ“ε…₯ζŒ‡δ»€ζ‰“εˆ†
105
- score_button.click(score_single, [score_prompt, openai_key, n_instruct], [test_score])
106
 
107
  demo.launch(show_error=True)
 
19
 
20
  with gr.Row():
21
  n_eval = gr.Slider(label="Number of Eval", minimum=5, maximum=30, step=5, value=20)
22
+
23
 
24
  with gr.Column(scale=3):
25
  gr.Markdown("## Load Data")
 
44
  with gr.Column(scale=2):
45
  gr.Markdown("## Run APE")
46
  gen_prompt = gr.Textbox(max_lines=100, lines=10,
47
+ value=MyTemplate['gen_user_prompt'], label="Prompt for generation")
48
  eval_prompt = gr.Textbox(max_lines=100, lines=10,
49
  value=MyTemplate['eval_prompt'], label="Prompt for Evaluation")
50
 
 
94
  cost_button.click(esttimate_cost, [instance], [cost])
95
 
96
  # 4. Run APE -> ζ‰€ζœ‰ζŒ‡δ»€
97
+ ape_button.click(generate, [instance, openai_key], [all_prompt])
98
 
99
  # 5. Evaluate -> εΎ—εˆ°ζ‰€ζœ‰ζŒ‡δ»€ηš„Log Prob
100
 
101
  # 6. θΎ“ε…₯ζŒ‡δ»€ε•ζ΅‹
102
+ test_button.click(single_test, [test_prompt, test_input, openai_key], [test_output])
103
 
104
  # 7. θΎ“ε…₯ζŒ‡δ»€ζ‰“εˆ†
105
+ score_button.click(score_single, [score_prompt, openai_key], [test_score])
106
 
107
  demo.launch(show_error=True)