Spaces:
Sleeping
Sleeping
ganagalachalapathi75
commited on
Commit
•
98f0af1
1
Parent(s):
4c6b8d9
Upload app.py with huggingface_hub
Browse files
app.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from langchain.chat_models import ChatOpenAI
|
4 |
+
from langchain import LLMChain, PromptTemplate
|
5 |
+
from langchain.memory import ConversationBufferMemory
|
6 |
+
|
7 |
+
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
8 |
+
|
9 |
+
template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging.
|
10 |
+
{chat_history}
|
11 |
+
User: {user_message}
|
12 |
+
Chatbot:"""
|
13 |
+
|
14 |
+
prompt = PromptTemplate(
|
15 |
+
input_variables=["chat_history", "user_message"], template=template
|
16 |
+
)
|
17 |
+
|
18 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
+
|
20 |
+
llm_chain = LLMChain(
|
21 |
+
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
+
prompt=prompt,
|
23 |
+
verbose=True,
|
24 |
+
memory=memory,
|
25 |
+
)
|
26 |
+
|
27 |
+
def get_text_response(user_message,history):
|
28 |
+
response = llm_chain.predict(user_message = user_message)
|
29 |
+
return response
|
30 |
+
|
31 |
+
demo = gr.ChatInterface(get_text_response)
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|