Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| import gradio as gr | |
| openai.api_key = "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" | |
| start_sequence = "\nAI:" | |
| restart_sequence = "\nHuman:" | |
| def predict(input, history=[]): | |
| s = list(sum(history, ())) | |
| s.append(input) | |
| response = openai.Completion.create( | |
| model_name="gpt-3.5-turbo", | |
| prompt= str(s), | |
| temperature=0.9, | |
| max_tokens=1050, | |
| top_p=1, | |
| frequency_penalty=0, | |
| presence_penalty=0.6, | |
| stop=[" Human:", " AI:"]) | |
| response2 = response["choices"][0]["text"] | |
| history.append((input, response2)) | |
| return history, history | |
| gr.Markdown("heyoo") | |
| gr.Interface(fn=predict, | |
| inputs=["text",'state'], | |
| outputs=["chatbot",'state']).launch() | |
| #def get_model_reply(user_input, context=[]): | |
| # context+=[user_input] | |
| # completion = openai.Completion.create( | |
| # engine="gpt-3.5-turbo", # one of the most capable models available | |
| # prompt='\\n'.join([f"I am {role}.", *context])[:4096], | |
| # max_tokens = 1048, | |
| # temperature = 0.9, | |
| # top_p = 1, | |
| # frequency_penalty=0, | |
| # presence_penalty=0.6, | |
| # ) | |
| # append response to context | |
| # response = completion.choices[0].text.strip('\\n') | |
| # context += [response] | |
| # list of (user, bot) responses. We will use this format later | |
| # responses = [(u,b) for u,b in zip(context[::2], context[1::2])] | |
| # return responses, context | |
| # ``` | |
| # defines a basic dialog interface using Gradio | |
| #with gr.Blocks() as dialog_app: | |
| # chatbot = gr.Chatbot() # dedicated "chatbot" component | |
| # state = gr.State([]) # session state that persists across multiple submits | |
| # with gr.Row(): | |
| # txt = gr.Textbox( | |
| # show_label=False, | |
| # placeholder="Enter text and press enter" | |
| # ).style(container=False) | |
| # txt.submit(get_model_reply, [txt, state], [chatbot, state]) | |
| #dialog_app.launch() | |