AI-DHD commited on
Commit
c45ecef
1 Parent(s): 6a65a2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -28
app.py CHANGED
@@ -4,12 +4,31 @@ import gradio as gr
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
6
 
7
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- openai.organization = "org-orRhfBkKOfOuNACbjPyWKbUt"
10
- openai.api_key = "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"
11
 
12
- def predict(input, history=[]):
13
 
14
  new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
15
 
@@ -27,28 +46,6 @@ def predict(input, history=[]):
27
  frequency_penalty=0,
28
  presence_penalty=0).tolist()
29
 
30
- # write some HTML
31
- html = "<div class='chatbot'>"
32
- for m, msg in enumerate(response):
33
- cls = "user" if m%2 == 0 else "bot"
34
- html += "<div class='msg {}'> {}</div>".format(cls, msg)
35
- html += "</div>"
36
-
37
- history = response[Completion]
38
-
39
- # convert the tokens to text, and then split the responses into lines
40
- response = tokenizer.decode(history[0]).split
41
-
42
- css = """
43
- .chatbox {display:flex;flex-direction:row}
44
- .msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
45
- .msg.user {background-color:cornflowerblue;color:white}
46
- .msg.bot {background-color:lightgray;align-self:self-end}
47
- .footer {display:none !important}
48
- """
49
-
50
  gr.Interface(fn=predict,
51
- theme="default",
52
- inputs=[gr.inputs.Textbox(placeholder="I'm AI-DHD - ask me anything!"), "state"],
53
- outputs=["html", "state"],
54
- css=css).launch()
 
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
6
 
7
+ openai.api_key = os.getenv("OPENAI_API_KEY")
8
+
9
+ def predict(input, history=[]):
10
+
11
+ new_user_input_ids = input
12
+ response = openai.Completion.create(
13
+ model="davinci:ft-placeholder-2022-12-10-04-13-26",
14
+ prompt=input
15
+ temperature=0.13,
16
+ max_tokens=310,
17
+ top_p=1,
18
+ frequency_penalty=0.36,
19
+ presence_penalty=1.25
20
+ )
21
+
22
+ # generate a response
23
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
24
+
25
+ # convert the tokens to text, and then split the responses into lines
26
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
27
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
+ return response, history
29
+
30
 
 
 
31
 
 
32
 
33
  new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
34
 
 
46
  frequency_penalty=0,
47
  presence_penalty=0).tolist()
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  gr.Interface(fn=predict,
50
+ inputs=["text", "state"],
51
+ outputs=["chatbot", "state"]).launch()