adarsh commited on
Commit
e3e6646
β€’
1 Parent(s): 97131a6

updated thinking anim

Browse files
Files changed (1) hide show
  1. app.py +15 -177
app.py CHANGED
@@ -1,164 +1,3 @@
1
- # import streamlit as st
2
- # from langchain.prompts import PromptTemplate
3
- # from langchain_community.llms import CTransformers
4
- # from src.helper import download_hf_embeddings, text_split, download_hf_model
5
- # from langchain_community.vectorstores import Pinecone as LangchainPinecone
6
- # import os
7
- # from dotenv import load_dotenv
8
- # from src.prompt import prompt_template
9
- # from langchain.chains import RetrievalQA
10
- # import time
11
- # from pinecone import Pinecone
12
- # from tqdm.auto import tqdm
13
-
14
- # # Load environment variables
15
- # load_dotenv()
16
-
17
- # PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
18
- # index_name = "medicure-chatbot"
19
-
20
- # # Set page configuration
21
- # st.set_page_config(page_title="Medical Chatbot", page_icon="πŸ₯", layout="wide")
22
-
23
- # # Custom CSS for styling
24
- # st.markdown("""
25
- # <style>
26
- # .stApp {
27
- # background-color: #f0f8ff;
28
- # }
29
- # .stButton>button {
30
- # background-color: #4CAF50;
31
- # color: white;
32
- # border-radius: 20px;
33
- # border: none;
34
- # padding: 10px 20px;
35
- # transition: all 0.3s ease;
36
- # }
37
- # .stButton>button:hover {
38
- # background-color: #333;
39
- # transform: scale(1.05);
40
- # color:#fff;
41
- # }
42
- # .footer {
43
- # position: fixed;
44
- # left: 0;
45
- # bottom: 0;
46
- # width: 100%;
47
- # background-color: #333;
48
- # color: white;
49
- # text-align: center;
50
- # padding: 10px 0;
51
- # }
52
- # .social-icons a {
53
- # color: white;
54
- # margin: 0 10px;
55
- # font-size: 24px;
56
- # }
57
- # </style>
58
- # """, unsafe_allow_html=True)
59
-
60
- # # Initialize session state for chat history
61
- # if 'chat_history' not in st.session_state:
62
- # st.session_state.chat_history = []
63
-
64
- # # Header
65
- # st.title("πŸ₯ Medicure RAG Chatbot")
66
-
67
- # # Display welcome message
68
- # st.write("Welcome to Medicure Chatbot! Ask any medical question and I'll do my best to help you.")
69
- # st.write("#### Built with πŸ€— Ctransformers, Langchain, and Pinecone. Powered by Metal-llama2-7b-chat quantized LLM")
70
-
71
- # # Initialize the chatbot components
72
- # @st.cache_resource
73
- # def initialize_chatbot():
74
-
75
- # embeddings = download_hf_embeddings()
76
- # # model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGML"
77
- # # model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
78
- # # model_path = download_hf_model(model_name_or_path, model_basename)
79
- # model_path = "TheBloke/Llama-2-7B-Chat-GGML"
80
- # llm = CTransformers(model=model_path,
81
- # model_type="llama",
82
- # config={'max_new_tokens': 512,
83
- # 'temperature': 0.8})
84
-
85
-
86
-
87
-
88
- # # initiaize pinecone
89
-
90
- # pc = Pinecone(api_key=PINECONE_API_KEY)
91
- # index = pc.Index(index_name)
92
-
93
- # PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
94
- # chain_type_kwargs = {"prompt": PROMPT}
95
- # docsearch = LangchainPinecone(index, embeddings.embed_query, "text")
96
- # qa = RetrievalQA.from_chain_type(
97
- # llm=llm,
98
- # chain_type="stuff",
99
- # retriever=docsearch.as_retriever(search_kwargs={'k': 2}),
100
- # return_source_documents=True,
101
- # chain_type_kwargs=chain_type_kwargs)
102
- # return qa
103
-
104
- # qa = initialize_chatbot()
105
-
106
- # # Chat interface
107
- # user_input = st.text_input("Ask your question:")
108
- # if st.button("Send", key="send"):
109
- # if user_input:
110
- # # Create a placeholder for the progress bar
111
- # progress_placeholder = st.empty()
112
-
113
- # # Simulate progress with tqdm
114
- # total_steps = 100
115
- # with tqdm(total=total_steps, file=progress_placeholder, desc="Thinking", bar_format='{l_bar}{bar}') as pbar:
116
- # for i in range(total_steps):
117
- # time.sleep(0.05) # Adjust this value to control the speed of the progress bar
118
- # pbar.update(1)
119
-
120
- # # Get the actual response
121
- # result = qa({"query": user_input})
122
- # response = result["result"]
123
-
124
- # # Clear the progress bar
125
- # progress_placeholder.empty()
126
-
127
- # st.session_state.chat_history.append(("You", user_input))
128
- # st.session_state.chat_history.append(("Bot", response))
129
-
130
- # # Display chat history
131
- # st.subheader("Chat History")
132
- # for role, message in st.session_state.chat_history:
133
- # if role == "You":
134
- # st.markdown(f"**You:** {message}")
135
- # else:
136
- # st.markdown(f"**Bot:** {message}")
137
-
138
- # # Animated loading for visual appeal
139
- # def load_animation():
140
- # with st.empty():
141
- # for i in range(3):
142
- # for j in ["β‹…", "β‹…β‹…", "β‹…β‹…β‹…", "β‹…β‹…β‹…β‹…"]:
143
- # st.write(f"Loading{j}")
144
- # time.sleep(0.2)
145
- # st.write("")
146
-
147
- # # Footer with social links
148
- # st.markdown("""
149
- # <div class="footer">
150
- # <div class="social-icons">
151
- # <a href="https://github.com/4darsh-Dev" target="_blank"><i class="fab fa-github"></i></a>
152
- # <a href="https://linkedin.com/in/adarsh-maurya-dev" target="_blank"><i class="fab fa-linkedin"></i></a>
153
- # <a href="https://adarshmaurya.onionreads.com" target="_blank"><i class="fas fa-globe"></i></a>
154
- # <a href="https://www.kaggle.com/adarshm09" target="_blank"><i class="fab fa-kaggle"></i></a>
155
- # </div>
156
- # <p> <p style="text-align:center;">Made with ❀️ by <a href="https://www.adarshmaurya.onionreads.com">Adarsh Maurya</a></p> </p>
157
- # </div>
158
- # """, unsafe_allow_html=True)
159
-
160
- # # Load Font Awesome for icons
161
- # st.markdown('<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/all.min.css">', unsafe_allow_html=True)
162
 
163
 
164
  import streamlit as st
@@ -271,23 +110,11 @@ qa = initialize_chatbot(k_value, max_new_tokens, temperature)
271
  # Chat interface
272
  user_input = st.text_input("Ask your question:")
273
  if st.button("Send", key="send"):
 
274
  if user_input:
275
- # Create a progress bar
276
- progress_bar = st.progress(0)
277
-
278
-
279
- total_steps = 100
280
- for i in range(total_steps):
281
- time.sleep(0.05)
282
- progress_bar.progress((i + 1) / total_steps)
283
-
284
- # Get the actual response
285
- result = qa({"query": user_input})
286
- response = result["result"]
287
-
288
- # Clear the progress bar
289
- progress_bar.empty()
290
-
291
  st.session_state.chat_history.append(("You", user_input))
292
  st.session_state.chat_history.append(("Bot", response))
293
 
@@ -299,6 +126,17 @@ for role, message in st.session_state.chat_history:
299
  else:
300
  st.markdown(f"**Bot:** {message}")
301
 
 
 
 
 
 
 
 
 
 
 
 
302
  # Footer with social links
303
  st.markdown("""
304
  <div class="footer">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
 
3
  import streamlit as st
 
110
  # Chat interface
111
  user_input = st.text_input("Ask your question:")
112
  if st.button("Send", key="send"):
113
+
114
  if user_input:
115
+ with st.spinner("Thinking..."):
116
+ result = qa({"query": user_input})
117
+ response = result["result"]
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  st.session_state.chat_history.append(("You", user_input))
119
  st.session_state.chat_history.append(("Bot", response))
120
 
 
126
  else:
127
  st.markdown(f"**Bot:** {message}")
128
 
129
+
130
+ # Animated loading for visual appeal
131
+ def load_animation():
132
+ with st.empty():
133
+ for i in range(3):
134
+ for j in ["β‹…", "β‹…β‹…", "β‹…β‹…β‹…", "β‹…β‹…β‹…β‹…"]:
135
+ st.write(f"Loading{j}")
136
+ time.sleep(0.2)
137
+ st.write("")
138
+
139
+
140
  # Footer with social links
141
  st.markdown("""
142
  <div class="footer">