yuxiang630 commited on
Commit
29752d3
1 Parent(s): 6bbad4a

feat: longer max tokens

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -12,17 +12,14 @@ def main(
12
  device="cpu",
13
  port=8070,
14
  ):
15
- tokenizer = AutoTokenizer.from_pretrained(base_model)
16
  pipeline = transformers.pipeline(
17
- "text-generation",
18
- model=base_model,
19
- torch_dtype=torch.float16,
20
- device=device
21
  )
 
22
  def evaluate_magicoder(
23
  instruction,
24
  temperature=1,
25
- max_new_tokens=2048,
26
  ):
27
  MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.
28
 
@@ -30,7 +27,7 @@ def main(
30
  {instruction}
31
 
32
  @@ Response
33
- """
34
  prompt = MAGICODER_PROMPT.format(instruction=instruction)
35
 
36
  if temperature > 0:
@@ -38,26 +35,28 @@ def main(
38
  prompt,
39
  do_sample=True,
40
  temperature=temperature,
41
- max_new_tokens=max_new_tokens,
42
  )
43
  else:
44
  sequences = pipeline(
45
  prompt,
46
- max_new_tokens=max_new_tokens,
47
  )
48
  for seq in sequences:
49
- generated_text = seq['generated_text'].replace(prompt, "")
50
  return generated_text
51
 
52
  gr.Interface(
53
  fn=evaluate_magicoder,
54
  inputs=[
55
  gr.components.Textbox(
56
- lines=3, label="Instruction", placeholder="Anything you want to ask Magicoder ?"
 
 
57
  ),
58
  gr.components.Slider(minimum=0, maximum=1, value=0, label="Temperature"),
59
  gr.components.Slider(
60
- minimum=1, maximum=2048, step=1, value=1024, label="Max tokens"
61
  ),
62
  ],
63
  outputs=[
@@ -67,8 +66,9 @@ def main(
67
  )
68
  ],
69
  title="Magicoder",
70
- description="This is a LLM playground for Magicoder! Follow us on Github: https://github.com/ise-uiuc/magicoder and Huggingface: https://huggingface.co/ise-uiuc."
71
  ).queue().launch(share=True, server_port=port)
72
 
 
73
  if __name__ == "__main__":
74
  fire.Fire(main)
 
12
  device="cpu",
13
  port=8070,
14
  ):
 
15
  pipeline = transformers.pipeline(
16
+ "text-generation", model=base_model, torch_dtype=torch.bfloat16, device=device
 
 
 
17
  )
18
+
19
  def evaluate_magicoder(
20
  instruction,
21
  temperature=1,
22
+ max_length=8192,
23
  ):
24
  MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.
25
 
 
27
  {instruction}
28
 
29
  @@ Response
30
+ """
31
  prompt = MAGICODER_PROMPT.format(instruction=instruction)
32
 
33
  if temperature > 0:
 
35
  prompt,
36
  do_sample=True,
37
  temperature=temperature,
38
+ max_length=max_length,
39
  )
40
  else:
41
  sequences = pipeline(
42
  prompt,
43
+ max_length=max_length,
44
  )
45
  for seq in sequences:
46
+ generated_text = seq["generated_text"].replace(prompt, "")
47
  return generated_text
48
 
49
  gr.Interface(
50
  fn=evaluate_magicoder,
51
  inputs=[
52
  gr.components.Textbox(
53
+ lines=3,
54
+ label="Instruction",
55
+ placeholder="Anything you want to ask Magicoder ?",
56
  ),
57
  gr.components.Slider(minimum=0, maximum=1, value=0, label="Temperature"),
58
  gr.components.Slider(
59
+ minimum=1, maximum=8192, step=1, value=1024, label="Max tokens"
60
  ),
61
  ],
62
  outputs=[
 
66
  )
67
  ],
68
  title="Magicoder",
69
+ description="This is a playground for Magicoder-S-DS-6.7B! Follow us on Github: https://github.com/ise-uiuc/magicoder and Huggingface: https://huggingface.co/ise-uiuc.",
70
  ).queue().launch(share=True, server_port=port)
71
 
72
+
73
  if __name__ == "__main__":
74
  fire.Fire(main)