Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,24 +9,23 @@ subprocess.check_call(["pip", "install", "-q", "gradio", "transformers", "python
|
|
9 |
#!pip install python-dotenv
|
10 |
|
11 |
import gradio as gr
|
12 |
-
from transformers import
|
13 |
-
import openai
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
19 |
|
20 |
-
def openai_chat(prompt):
|
21 |
-
completions = openai.Completion.create( engine="text-davinci-003",prompt=prompt,max_tokens=1024,n=1,temperature=0.5,)
|
22 |
-
message = completions.choices[0].text
|
23 |
-
return message.strip()
|
24 |
-
|
25 |
def chatbot(input, history=[]):
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
-
gr.Interface(
|
31 |
-
|
32 |
-
|
|
|
|
|
|
9 |
#!pip install python-dotenv
|
10 |
|
11 |
import gradio as gr
|
12 |
+
from transformers import GPTNeoForCausalLM, AutoTokenizer
|
|
|
13 |
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
15 |
+
model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
16 |
+
|
17 |
+
def generate_text(prompt):
|
18 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
19 |
+
output = model.generate(input_ids, max_length=1024, temperature=0.5, do_sample=True)
|
20 |
+
return tokenizer.decode(output[0], skip_special_tokens=True)
|
21 |
|
|
|
|
|
|
|
|
|
|
|
22 |
def chatbot(input, history=[]):
|
23 |
+
output = generate_text(input)
|
24 |
+
history.append((input, output))
|
25 |
+
return history, history
|
26 |
|
27 |
+
gr.Interface(
|
28 |
+
fn=chatbot,
|
29 |
+
inputs=["text", "state"],
|
30 |
+
outputs=["chatbot", "state"]
|
31 |
+
).launch(debug=True)
|