Saurabhgk18 commited on
Commit
36145c4
β€’
1 Parent(s): d8befc5

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +75 -0
  2. assets/chatbot.jpg +0 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import streamlit as st
3
+ from langchain.llms import HuggingFaceHub
4
+ from langchain.chains import ConversationChain
5
+ import os
6
+ from langchain.chains.conversation.memory import ConversationBufferMemory
7
+ from langchain.chains.conversation.memory import ConversationSummaryBufferMemory
8
+ # os.environ['HUGGING_FACE_HUB_API_KEY']
9
+
10
+ st.sidebar.title("Welcome Wanderers", help='This is just a beta model, and is still in progress!!!')
11
+ # Add an image to the sidebar
12
+ st.sidebar.image("assets/chatbot.jpg")
13
+
14
+ st.sidebar.divider()
15
+
16
+ # Create a sidebar dropdown
17
+ selected_option = st.sidebar.selectbox("Select Model:", ["lmsys/fastchat-t5-3b-v1.0", "google/flan-t5-base",])
18
+
19
+ # Display the selected option below the dropdown
20
+ # st.sidebar.write("Model : ", selected_option)
21
+
22
+ st.sidebar.divider()
23
+
24
+ max_length = st.sidebar.slider("Max Length", value=132, min_value=32, max_value=250)
25
+ temperature = st.sidebar.slider("Temperature", value=0.60, min_value=0.0, max_value=1.0, step=0.05)
26
+
27
+ repo_id = selected_option
28
+ llm = HuggingFaceHub(
29
+ huggingfacehub_api_token=os.environ['HUGGING_FACE_HUB_API_KEY'],
30
+ repo_id=repo_id,
31
+ model_kwargs={
32
+ 'temperature': temperature,
33
+ 'max_length': max_length,
34
+ }
35
+ )
36
+
37
+ memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=80)
38
+ Conversation_buf = ConversationChain(
39
+ llm=llm,
40
+ memory=memory
41
+ )
42
+
43
+ st.markdown("<h1 style='text-align: center;'>Chat Application πŸš€πŸ€–</h1>", unsafe_allow_html=True)
44
+
45
+ st.divider()
46
+ default_value = "See how a modern neural network auto-completes your text πŸ€— This site, built by \nthe Me using HuggingFace Models, Its like having a smart machine that completes \nyour thoughts πŸ˜€ Get started by typing a custom snippet, check out the repository, \nor try one of the examples. Have fun!"
47
+ st.text(default_value)
48
+
49
+ st.divider()
50
+
51
+ # Create a placeholder for the conversation history
52
+ conversation_history_placeholder = st.empty()
53
+
54
+ # Create a list to store the conversation history
55
+ conversation_history = []
56
+
57
+ user_input = st.text_input("Your Query", max_chars=2024)
58
+
59
+ if st.button("Predict"):
60
+ # Append user input to the conversation history
61
+ conversation_history.insert(0 ,f"User: {user_input}")
62
+
63
+ # Await the coroutine to get the actual text
64
+ prediction = asyncio.run(Conversation_buf.acall(inputs=user_input))
65
+ keys_list = list(prediction.items())
66
+ keys = keys_list[2]
67
+ response = keys[1][5:]
68
+
69
+ # Append model response to the conversation history
70
+ conversation_history.insert(1, f"Mr.Zhongli: {response}")
71
+
72
+ # Update the conversation history placeholder
73
+ conversation_history_placeholder.text_area("Conversation...", "\n".join(conversation_history), height=200)
74
+
75
+ # st.text(memory.buffer)
assets/chatbot.jpg ADDED