TheoLvs commited on
Commit
abfa81d
1 Parent(s): 2e9b317

Updated CSS

Browse files
Files changed (2) hide show
  1. app.py +61 -45
  2. style.css +9 -9
app.py CHANGED
@@ -33,8 +33,9 @@ except:
33
  pass
34
 
35
  # Set up Gradio Theme
36
- theme = gr.themes.Soft(
37
- primary_hue="sky",
 
38
  font=[gr.themes.GoogleFont("Poppins"), "ui-sans-serif", "system-ui", "sans-serif"],
39
  )
40
 
@@ -63,10 +64,31 @@ user_id = create_user_id(10)
63
  # ClimateQ&A core functions
64
  #---------------------------------------------------------------------------
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  # Create embeddings function and LLM
67
  embeddings_function = HuggingFaceEmbeddings(model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1")
68
- llm = get_llm(max_tokens = 1024,temperature = 0.0,verbose = True,streaming = False,
69
- callbacks=[StreamingStdOutCallbackHandler()],
70
  )
71
 
72
  # Create vectorstore and retriever
@@ -80,56 +102,49 @@ chain = load_climateqa_chain(retriever,llm)
80
  # From https://github.com/gradio-app/gradio/issues/5345
81
  #---------------------------------------------------------------------------
82
 
83
- # from langchain.callbacks.base import BaseCallbackHandler
84
- # from queue import Queue, Empty
85
- # from threading import Thread
86
- # from collections.abc import Generator
87
 
88
- # class QueueCallback(BaseCallbackHandler):
89
- # """Callback handler for streaming LLM responses to a queue."""
90
 
91
- # def __init__(self, q):
92
- # self.q = q
 
 
 
93
 
94
- # def on_llm_new_token(self, token: str, **kwargs: any) -> None:
95
- # self.q.put(token)
 
 
96
 
97
- # def on_llm_end(self, *args, **kwargs: any) -> None:
98
- # return self.q.empty()
99
-
100
 
101
- # def stream(input_text) -> Generator:
102
- # # Create a Queue
103
- # q = Queue()
104
- # job_done = object()
105
 
106
- # llm = get_llm(max_tokens = 1024,temperature = 0.0,verbose = True,streaming = True,
107
- # callbacks=[QueueCallback(q)],
108
- # )
 
 
 
 
 
 
 
109
 
110
- # chain = load_climateqa_chain(retriever,llm)
111
 
112
- # # Create a funciton to call - this will run in a thread
113
- # def task():
114
- # answer = chain({"query":input_text,"audience":"expert climate scientist"})
115
- # q.put(job_done)
 
 
 
 
 
 
116
 
117
- # # Create a thread and start the function
118
- # t = Thread(target=task)
119
- # t.start()
120
 
121
- # content = ""
122
-
123
- # # Get each new token from the queue and yield for our generator
124
- # while True:
125
- # try:
126
- # next_token = q.get(True, timeout=1)
127
- # if next_token is job_done:
128
- # break
129
- # content += next_token
130
- # yield next_token, content
131
- # except Empty:
132
- # continue
133
 
134
 
135
  def answer_user(message,history):
@@ -154,6 +169,7 @@ def answer_bot(message,history,audience):
154
  # history_langchain_format.append(HumanMessage(content=message)
155
  # for next_token, content in stream(message):
156
  # yield(content)
 
157
  output = chain({"query":message,"audience":audience_prompt})
158
  question = output["question"]
159
  sources = output["source_documents"]
@@ -347,7 +363,7 @@ with gr.Blocks(title="🌍 Climate Q&A", css="style.css", theme=theme) as demo:
347
  with gr.Row(elem_id="chatbot-row"):
348
  with gr.Column(scale=2):
349
  # state = gr.State([system_template])
350
- bot = gr.Chatbot(height="100%",show_copy_button=True,show_label = False,elem_id="chatbot")
351
  textbox=gr.Textbox(placeholder="Ask me a question about climate change or biodiversity in any language!",show_label=False)
352
  submit_button = gr.Button("Submit")
353
 
 
33
  pass
34
 
35
  # Set up Gradio Theme
36
+ theme = gr.themes.Base(
37
+ primary_hue="blue",
38
+ secondary_hue="red",
39
  font=[gr.themes.GoogleFont("Poppins"), "ui-sans-serif", "system-ui", "sans-serif"],
40
  )
41
 
 
64
  # ClimateQ&A core functions
65
  #---------------------------------------------------------------------------
66
 
67
+ from langchain.callbacks.base import BaseCallbackHandler
68
+ from queue import Queue, Empty
69
+ from threading import Thread
70
+ from collections.abc import Generator
71
+
72
+ # Create a Queue
73
+ Q = Queue()
74
+
75
+ class QueueCallback(BaseCallbackHandler):
76
+ """Callback handler for streaming LLM responses to a queue."""
77
+
78
+ def __init__(self, q):
79
+ self.q = q
80
+
81
+ def on_llm_new_token(self, token: str, **kwargs: any) -> None:
82
+ self.q.put(token)
83
+
84
+ def on_llm_end(self, *args, **kwargs: any) -> None:
85
+ return self.q.empty()
86
+
87
+
88
  # Create embeddings function and LLM
89
  embeddings_function = HuggingFaceEmbeddings(model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1")
90
+ llm = get_llm(max_tokens = 1024,temperature = 0.0,verbose = True,streaming = True,
91
+ callbacks=[QueueCallback(Q)],
92
  )
93
 
94
  # Create vectorstore and retriever
 
102
  # From https://github.com/gradio-app/gradio/issues/5345
103
  #---------------------------------------------------------------------------
104
 
 
 
 
 
105
 
 
 
106
 
107
+ # Create a function that will return our generator
108
+ def stream(chain, input_text) -> Generator:
109
+ with Q.mutex:
110
+ Q.queue.clear()
111
+ job_done = object()
112
 
113
+ # Create a function to call - this will run in a thread
114
+ def task():
115
+ answer = chain({"query":input_text,"audience":"expert climate scientist"})
116
+ Q.put(job_done)
117
 
118
+ # Create a thread and start the function
119
+ t = Thread(target=task)
120
+ t.start()
121
 
122
+ content = ""
 
 
 
123
 
124
+ # Get each new token from the queue and yield for our generator
125
+ while True:
126
+ try:
127
+ next_token = Q.get(True, timeout=1)
128
+ if next_token is job_done:
129
+ break
130
+ content += next_token
131
+ yield next_token, content
132
+ except Empty:
133
+ continue
134
 
 
135
 
136
+ def stream_sentences(chain, input_text) -> Generator:
137
+ """wrapper to stream function"""
138
+ sentence = ""
139
+ for next_token, content in stream(chain, input_text):
140
+ sentence += next_token
141
+ if "\n\n" in next_token:
142
+ yield sentence
143
+ sentence = ""
144
+ if sentence:
145
+ yield sentence
146
 
 
 
 
147
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
 
150
  def answer_user(message,history):
 
169
  # history_langchain_format.append(HumanMessage(content=message)
170
  # for next_token, content in stream(message):
171
  # yield(content)
172
+
173
  output = chain({"query":message,"audience":audience_prompt})
174
  question = output["question"]
175
  sources = output["source_documents"]
 
363
  with gr.Row(elem_id="chatbot-row"):
364
  with gr.Column(scale=2):
365
  # state = gr.State([system_template])
366
+ bot = gr.Chatbot(show_copy_button=True,show_label = False,elem_id="chatbot",layout = "panel",avatar_images = (None,"assets/logo4.png"))
367
  textbox=gr.Textbox(placeholder="Ask me a question about climate change or biodiversity in any language!",show_label=False)
368
  submit_button = gr.Button("Submit")
369
 
style.css CHANGED
@@ -108,17 +108,17 @@ a {
108
 
109
 
110
  .message.user{
111
- background-color:#7494b0 !important;
112
  border:none;
113
- color:white!important;
114
  }
115
 
116
  .message.bot{
117
- background-color:#f2f2f7 !important;
118
  border:none;
119
  }
120
 
121
- .gallery-item > div:hover{
122
  background-color:#7494b0 !important;
123
  color:white!important;
124
  }
@@ -134,18 +134,18 @@ a {
134
 
135
  .label{
136
  color:#577b9b!important;
137
- }
138
 
139
- .paginate{
140
  color:#577b9b!important;
141
- }
142
 
143
 
144
 
145
- span[data-testid="block-info"]{
146
  background:none !important;
147
  color:#577b9b;
148
- }
149
 
150
  /* Pseudo-element for the circularly cropped picture */
151
  /* .message.bot::before {
 
108
 
109
 
110
  .message.user{
111
+ /* background-color:#7494b0 !important; */
112
  border:none;
113
+ /* color:white!important; */
114
  }
115
 
116
  .message.bot{
117
+ /* background-color:#f2f2f7 !important; */
118
  border:none;
119
  }
120
 
121
+ /* .gallery-item > div:hover{
122
  background-color:#7494b0 !important;
123
  color:white!important;
124
  }
 
134
 
135
  .label{
136
  color:#577b9b!important;
137
+ } */
138
 
139
+ /* .paginate{
140
  color:#577b9b!important;
141
+ } */
142
 
143
 
144
 
145
+ /* span[data-testid="block-info"]{
146
  background:none !important;
147
  color:#577b9b;
148
+ } */
149
 
150
  /* Pseudo-element for the circularly cropped picture */
151
  /* .message.bot::before {