Martin Vlach
commited on
Commit
β’
c415fa7
1
Parent(s):
4bf07e3
change code of app to match the MC GHub
Browse files- app.py +64 -46
- requirements.txt +1 -1
app.py
CHANGED
@@ -1,53 +1,71 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
import os
|
4 |
-
import
|
5 |
-
|
|
|
|
|
6 |
|
7 |
|
8 |
-
def
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
for human, assistant in history:
|
17 |
-
instruction += 'USER: '+ human + ' ASSISTANT: '+ assistant + '</s>'
|
18 |
-
instruction += 'USER: '+ message + ' ASSISTANT:'
|
19 |
-
problem = [instruction]
|
20 |
-
stop_tokens = ["USER:", "USER", "ASSISTANT:", "ASSISTANT"]
|
21 |
-
sampling_params = SamplingParams(temperature=temperature, top_p=1, max_tokens=max_tokens, stop=stop_tokens)
|
22 |
-
completions = llm.generate(problem, sampling_params)
|
23 |
-
for output in completions:
|
24 |
-
prompt = output.prompt
|
25 |
-
print('==========================question=============================')
|
26 |
-
print(prompt)
|
27 |
-
generated_text = output.outputs[0].text
|
28 |
-
print('===========================answer=============================')
|
29 |
-
print(generated_text)
|
30 |
-
for idx in range(len(generated_text)):
|
31 |
-
yield generated_text[:idx+1]
|
32 |
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
],
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
import transformers
|
3 |
import os
|
4 |
+
import sys
|
5 |
+
import fire
|
6 |
+
import torch
|
7 |
+
import gradio as gr
|
8 |
|
9 |
|
10 |
+
def main(
|
11 |
+
base_model="ise-uiuc/Magicoder-S-DS-6.7B",
|
12 |
+
device="cuda:0",
|
13 |
+
port=8080,
|
14 |
+
):
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
16 |
+
pipeline = transformers.pipeline(
|
17 |
+
"text-generation",
|
18 |
+
model=base_model,
|
19 |
+
torch_dtype=torch.float16,
|
20 |
+
device=device
|
21 |
+
)
|
22 |
+
def evaluate_magicoder(
|
23 |
+
instruction,
|
24 |
+
temperature=1,
|
25 |
+
max_new_tokens=2048,
|
26 |
+
):
|
27 |
+
MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.
|
28 |
|
29 |
+
@@ Instruction
|
30 |
+
{instruction}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
@@ Response
|
33 |
+
"""
|
34 |
+
prompt = MAGICODER_PROMPT.format(instruction=instruction)
|
35 |
|
36 |
+
sequences = pipeline(
|
37 |
+
prompt,
|
38 |
+
temperature=temperature,
|
39 |
+
max_new_tokens=max_new_tokens,
|
40 |
+
)
|
41 |
+
for seq in sequences:
|
42 |
+
print('==========================question=============================')
|
43 |
+
print(prompt)
|
44 |
+
generated_text = seq['generated_text'].replace(prompt, "")
|
45 |
+
print('===========================answer=============================')
|
46 |
+
print(generated_text)
|
47 |
+
return generated_text
|
48 |
+
|
49 |
+
gr.Interface(
|
50 |
+
fn=evaluate_magicoder,
|
51 |
+
inputs=[
|
52 |
+
gr.components.Textbox(
|
53 |
+
lines=3, label="Instruction", placeholder="Anything you want to ask Magicoder ?"
|
54 |
+
),
|
55 |
+
gr.components.Slider(minimum=0, maximum=1, value=0, label="Temperature"),
|
56 |
+
gr.components.Slider(
|
57 |
+
minimum=1, maximum=2048, step=1, value=128, label="Max tokens"
|
58 |
+
),
|
59 |
],
|
60 |
+
outputs=[
|
61 |
+
gr.components.Textbox(
|
62 |
+
lines=30,
|
63 |
+
label="Output",
|
64 |
+
)
|
65 |
+
],
|
66 |
+
title="Magicoder",
|
67 |
+
description="This is a LLM playground for Magicoder! Follow us on Github: https://github.com/ise-uiuc/magicoder and Huggingface: https://huggingface.co/ise-uiuc."
|
68 |
+
).queue().launch(share=True, server_port=port)
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
fire.Fire(main)
|
requirements.txt
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
fire
|