Tricia Nieva commited on
Commit
bfeccc6
1 Parent(s): 6838f70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -8
app.py CHANGED
@@ -7,12 +7,23 @@
7
  import os
8
  import openai
9
  import gradio as gr
 
 
10
 
11
  openai.organization = "org-orRhfBkKOfOuNACbjPyWKbUt"
12
  openai.api_key = "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"
13
 
14
- def chat(text):
15
 
 
 
 
 
 
 
 
 
 
16
  response = openai.Completion.create(
17
  model="davinci:ft-placeholder:ai-dhd-2022-12-07-10-09-37",
18
  prompt= input,
@@ -20,10 +31,15 @@ def chat(text):
20
  max_tokens=608,
21
  top_p=1,
22
  frequency_penalty=0,
23
- presence_penalty=0)
24
-
25
- xyz = response[Completion]
26
- return xyz
27
-
28
- demo = gr.Interface(chat, "text", "text")
29
- demo.launch(share=True, auth=("username", "password")).launch(debug = True)
 
 
 
 
 
 
7
  import os
8
  import openai
9
  import gradio as gr
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
11
+ import torch
12
 
13
  openai.organization = "org-orRhfBkKOfOuNACbjPyWKbUt"
14
  openai.api_key = "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"
15
 
16
+ def predict(input, history=[]):
17
 
18
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
19
+
20
+ # tokenize the new input sentence
21
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
22
+
23
+ # append the new user input tokens to the chat history
24
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
25
+
26
+ # generate a response
27
  response = openai.Completion.create(
28
  model="davinci:ft-placeholder:ai-dhd-2022-12-07-10-09-37",
29
  prompt= input,
 
31
  max_tokens=608,
32
  top_p=1,
33
  frequency_penalty=0,
34
+ presence_penalty=0).tolist()
35
+
36
+ history = response[Completion]
37
+
38
+ # convert the tokens to text, and then split the responses into lines
39
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
40
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
41
+ return response, history
42
+
43
+ gr.Interface(fn=predict,
44
+ inputs=["text", "state"],
45
+ outputs=["chatbot", "state"]).launch()