vishwask commited on
Commit
f0b041f
1 Parent(s): bc8dadd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -26,7 +26,7 @@ import transformers
26
  from pydub import AudioSegment
27
  from streamlit_extras.streaming_write import write
28
  import time
29
- from langchain.llms import HuggingFaceHub
30
  import transformers
31
  from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
32
  translation_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
@@ -123,10 +123,8 @@ def load_model(_docs):
123
  top_p=0.95,
124
  repetition_penalty=1.15,
125
  streamer=streamer,)
126
- #llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.1})
127
-
128
- llm = HuggingFaceHub(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_kwargs={"temperature": 0.1, "max_new_tokens": 1024, "top_k": 3, "load_in_8bit": True})
129
-
130
  # SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
131
  # "If you don't know the answer, just say that you don't know, "
132
  # "don't try to make up an answer.")
@@ -139,7 +137,7 @@ def load_model(_docs):
139
  qa_chain = RetrievalQA.from_chain_type(
140
  llm=llm,
141
  chain_type="stuff",
142
- retriever=db.as_retriever(search_kwargs={"k": 10}),
143
  return_source_documents=True,
144
  chain_type_kwargs={"prompt": prompt,
145
  "verbose": False})
@@ -178,16 +176,14 @@ qa_chain = load_model(docs)
178
  if prompt := st.chat_input("How can I help you today?"):
179
  st.session_state.messages.append({"role": "user", "content": prompt})
180
  with st.chat_message("user"):
181
- #english_prompt = hindi_to_english(prompt)
182
- #st.markdown(english_prompt)
183
- st.markdown(prompt)
184
  with st.chat_message("assistant"):
185
  with st.spinner(text="Looking for relevant answer"):
186
  message_placeholder = st.empty()
187
  full_response = ""
188
  message_history = "\n".join(list(get_message_history())[-3:])
189
  result = qa_chain(prompt)
190
- #result = qa_chain(english_prompt)
191
  output = [result['result']]
192
 
193
  def generate_pdf():
@@ -240,10 +236,10 @@ if prompt := st.chat_input("How can I help you today?"):
240
  # #yield word + " "
241
  # time.sleep(0.1)
242
 
243
- #for item in output:
244
- # full_response += english_to_hindi(item)[0]
245
- # message_placeholder.markdown(full_response + "▌")
246
- # message_placeholder.markdown(full_response)
247
  # message_placeholder.markdown(result['source_documents'])
248
 
249
  #stream_example()
@@ -254,9 +250,7 @@ if prompt := st.chat_input("How can I help you today?"):
254
  # message_placeholder.markdown(write(stream_example))
255
 
256
  #write(stream_example)
257
- #message_placeholder.markdown(english_to_hindi(result['result'])[0])
258
- message_placeholder.markdown(result['result'])
259
-
260
 
261
  # sound_file = BytesIO()
262
  # tts = gTTS(result['result'], lang='en')
 
26
  from pydub import AudioSegment
27
  from streamlit_extras.streaming_write import write
28
  import time
29
+
30
  import transformers
31
  from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
32
  translation_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
 
123
  top_p=0.95,
124
  repetition_penalty=1.15,
125
  streamer=streamer,)
126
+ llm = HuggingFacePipeline(pipeline=text_pipeline, model_kwargs={"temperature": 0.1})
127
+
 
 
128
  # SYSTEM_PROMPT = ("Use the following pieces of context to answer the question at the end. "
129
  # "If you don't know the answer, just say that you don't know, "
130
  # "don't try to make up an answer.")
 
137
  qa_chain = RetrievalQA.from_chain_type(
138
  llm=llm,
139
  chain_type="stuff",
140
+ retriever=db.as_retriever(search_kwargs={"k": 3}),
141
  return_source_documents=True,
142
  chain_type_kwargs={"prompt": prompt,
143
  "verbose": False})
 
176
  if prompt := st.chat_input("How can I help you today?"):
177
  st.session_state.messages.append({"role": "user", "content": prompt})
178
  with st.chat_message("user"):
179
+ english_prompt = hindi_to_english(prompt)
180
+ st.markdown(english_prompt)
 
181
  with st.chat_message("assistant"):
182
  with st.spinner(text="Looking for relevant answer"):
183
  message_placeholder = st.empty()
184
  full_response = ""
185
  message_history = "\n".join(list(get_message_history())[-3:])
186
  result = qa_chain(prompt)
 
187
  output = [result['result']]
188
 
189
  def generate_pdf():
 
236
  # #yield word + " "
237
  # time.sleep(0.1)
238
 
239
+ for item in output:
240
+ full_response += english_to_hindi(item)[0]
241
+ message_placeholder.markdown(full_response + "▌")
242
+ message_placeholder.markdown(full_response)
243
  # message_placeholder.markdown(result['source_documents'])
244
 
245
  #stream_example()
 
250
  # message_placeholder.markdown(write(stream_example))
251
 
252
  #write(stream_example)
253
+ message_placeholder.markdown(english_to_hindi(result['result'])[0])
 
 
254
 
255
  # sound_file = BytesIO()
256
  # tts = gTTS(result['result'], lang='en')