dineth554 commited on
Commit
d0a6d13
1 Parent(s): 70c3062

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -44
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import os
2
  import logging
3
- import streamlit as st
 
4
 
5
  # Install necessary libraries using os.system
6
  os.system("pip install --upgrade pip")
7
- os.system("pip install streamlit llama-cpp-agent huggingface_hub trafilatura beautifulsoup4 requests duckduckgo-search googlesearch-python")
8
 
9
  # Attempt to import all required modules
10
  try:
@@ -22,22 +23,7 @@ try:
22
  from utils import CitingSources
23
  from settings import get_context_by_model, get_messages_formatter_type
24
  except ImportError as e:
25
- st.error(f"Error importing modules: {e}")
26
- if 'utils' in str(e):
27
- st.warning("Mocking utils.CitingSources")
28
- class CitingSources:
29
- sources = []
30
-
31
- if 'settings' in str(e):
32
- st.warning("Mocking settings functions")
33
- def get_context_by_model(model):
34
- return 4096
35
-
36
- def get_messages_formatter_type(model):
37
- return MessagesFormatterType.BASIC
38
-
39
- import logging
40
- from huggingface_hub import hf_hub_download
41
 
42
  # Download the models
43
  hf_hub_download(
@@ -57,7 +43,7 @@ hf_hub_download(
57
  )
58
 
59
  # Function to respond to user messages
60
- def respond(message, history, temperature, top_p, top_k, repeat_penalty):
61
  try:
62
  model = "mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf"
63
  max_tokens = 3000
@@ -106,12 +92,6 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
106
 
107
  messages = BasicChatHistory()
108
 
109
- for msn in history:
110
- user = {"role": Roles.user, "content": msn[0]}
111
- assistant = {"role": Roles.assistant, "content": msn[1]}
112
- messages.add_message(user)
113
- messages.add_message(assistant)
114
-
115
  result = web_search_agent.get_chat_response(
116
  message,
117
  llm_sampling_settings=settings,
@@ -136,7 +116,6 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
136
 
137
  for text in response_text:
138
  outputs += text
139
- yield outputs
140
 
141
  output_settings = LlmStructuredOutputSettings.from_pydantic_models(
142
  [CitingSources], LlmStructuredOutputType.object_instance
@@ -153,23 +132,24 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
153
  )
154
  outputs += "\n\nSources:\n"
155
  outputs += "\n".join(citing_sources.sources)
156
- yield outputs
157
 
158
  except Exception as e:
159
- st.error(f"An error occurred: {e}")
160
-
161
- st.title("Novav2 Web Engine")
162
-
163
- message = st.text_input("Enter your message:")
164
- history = st.session_state.get("history", [])
165
- temperature = st.slider("Temperature", min_value=0.1, max_value=1.0, value=0.45, step=0.1)
166
- top_p = st.slider("Top-p", min_value=0.1, max_value=1.0, value=0.95, step=0.05)
167
- top_k = st.slider("Top-k", min_value=0, max_value=100, value=40, step=1)
168
- repeat_penalty = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.1, step=0.1)
169
-
170
- if st.button("Send"):
171
- response_generator = respond(message, history, temperature, top_p, top_k, repeat_penalty)
172
- for response in response_generator:
173
- st.write(response)
174
- history.append((message, response))
175
- st.session_state["history"] = history
 
 
1
  import os
2
  import logging
3
+ import gradio as gr
4
+ from huggingface_hub import hf_hub_download
5
 
6
  # Install necessary libraries using os.system
7
  os.system("pip install --upgrade pip")
8
+ os.system("pip install llama-cpp-agent huggingface_hub trafilatura beautifulsoup4 requests duckduckgo-search googlesearch-python")
9
 
10
  # Attempt to import all required modules
11
  try:
 
23
  from utils import CitingSources
24
  from settings import get_context_by_model, get_messages_formatter_type
25
  except ImportError as e:
26
+ raise ImportError(f"Error importing modules: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Download the models
29
  hf_hub_download(
 
43
  )
44
 
45
  # Function to respond to user messages
46
+ def respond(message, temperature, top_p, top_k, repeat_penalty):
47
  try:
48
  model = "mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf"
49
  max_tokens = 3000
 
92
 
93
  messages = BasicChatHistory()
94
 
 
 
 
 
 
 
95
  result = web_search_agent.get_chat_response(
96
  message,
97
  llm_sampling_settings=settings,
 
116
 
117
  for text in response_text:
118
  outputs += text
 
119
 
120
  output_settings = LlmStructuredOutputSettings.from_pydantic_models(
121
  [CitingSources], LlmStructuredOutputType.object_instance
 
132
  )
133
  outputs += "\n\nSources:\n"
134
  outputs += "\n".join(citing_sources.sources)
135
+ return outputs
136
 
137
  except Exception as e:
138
+ return f"An error occurred: {e}"
139
+
140
+ # Gradio interface
141
+ demo = gr.Interface(
142
+ fn=respond,
143
+ inputs=[
144
+ gr.Textbox(label="Enter your message:"),
145
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.45, step=0.1, label="Temperature"),
146
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
147
+ gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k"),
148
+ gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
149
+ ],
150
+ outputs="text",
151
+ title="Novav2 Web Engine"
152
+ )
153
+
154
+ if __name__ == "__main__":
155
+ demo.launch()