derek-thomas HF staff commited on
Commit
f1ecb2c
1 Parent(s): 944ee1c

Adding cached examples, need to find out how to change the order.

Browse files
Files changed (1) hide show
  1. app.py +27 -4
app.py CHANGED
@@ -28,7 +28,7 @@ tokenizer = AutoTokenizer.from_pretrained('derek-thomas/jais-13b-chat-hf')
28
  # Examples
29
  examples = ['من كان طرفي معركة اكتيوم البحرية؟',
30
  'لم السماء زرقاء؟',
31
- "من فاز بكأس العالم للرجال في عام 2014؟",]
32
 
33
 
34
  def add_text(history, text):
@@ -45,7 +45,8 @@ def bot(history, hyde=False):
45
  # Retrieve documents relevant to query
46
  document_start = perf_counter()
47
  if hyde:
48
- hyde_document = generate(f"Write a wikipedia article intro paragraph to answer this query: {query}").split('### Response: [|AI|]')[-1]
 
49
 
50
  logger.warning(hyde_document)
51
  documents = retriever(hyde_document, top_k=top_k)
@@ -76,6 +77,7 @@ def bot(history, hyde=False):
76
  history[-1][1] = response.split('### Response: [|AI|]')[-1]
77
  return history, prompt_html
78
 
 
79
  intro_md = """
80
  # Arabic RAG
81
  This is a project to demonstrate Retreiver Augmented Generation (RAG) in Arabic and English. It uses
@@ -91,6 +93,14 @@ I'm using Inference Endpoint's Scale to Zero to save money on GPUs. If the staus
91
  chat to wake it up. You will get a `500 error` and it will take ~7 min to wake up.
92
  """
93
 
 
 
 
 
 
 
 
 
94
  with gr.Blocks() as demo:
95
  gr.Markdown(intro_md)
96
  endpoint_status = gr.Textbox(check_endpoint_status, label="Inference Endpoint Status", every=1)
@@ -114,8 +124,15 @@ with gr.Blocks() as demo:
114
  )
115
  txt_btn = gr.Button(value="Submit text", scale=1)
116
 
117
- gr.Examples(examples, txt)
118
  prompt_html = gr.HTML()
 
 
 
 
 
 
 
119
  # Turn off interactivity while generating if you click
120
  txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
121
  bot, chatbot, [chatbot, prompt_html])
@@ -150,8 +167,14 @@ with gr.Blocks() as demo:
150
  )
151
  hyde_txt_btn = gr.Button(value="Submit text", scale=1)
152
 
153
- gr.Examples(examples, hyde_txt)
154
  hyde_prompt_html = gr.HTML()
 
 
 
 
 
 
 
155
  # Turn off interactivity while generating if you click
156
  hyde_txt_msg = hyde_txt_btn.click(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt],
157
  queue=False).then(
 
28
  # Examples
29
  examples = ['من كان طرفي معركة اكتيوم البحرية؟',
30
  'لم السماء زرقاء؟',
31
+ "من فاز بكأس العالم للرجال في عام 2014؟", ]
32
 
33
 
34
  def add_text(history, text):
 
45
  # Retrieve documents relevant to query
46
  document_start = perf_counter()
47
  if hyde:
48
+ hyde_document = generate(f"Write a wikipedia article intro paragraph to answer this query: {query}").split(
49
+ '### Response: [|AI|]')[-1]
50
 
51
  logger.warning(hyde_document)
52
  documents = retriever(hyde_document, top_k=top_k)
 
77
  history[-1][1] = response.split('### Response: [|AI|]')[-1]
78
  return history, prompt_html
79
 
80
+
81
  intro_md = """
82
  # Arabic RAG
83
  This is a project to demonstrate Retreiver Augmented Generation (RAG) in Arabic and English. It uses
 
93
  chat to wake it up. You will get a `500 error` and it will take ~7 min to wake up.
94
  """
95
 
96
+
97
+ def process_example(text, history=[]):
98
+ history = history + [[text, None]]
99
+ return bot(history)
100
+
101
+
102
+ # hyde_prompt_html = gr.HTML()
103
+
104
  with gr.Blocks() as demo:
105
  gr.Markdown(intro_md)
106
  endpoint_status = gr.Textbox(check_endpoint_status, label="Inference Endpoint Status", every=1)
 
124
  )
125
  txt_btn = gr.Button(value="Submit text", scale=1)
126
 
127
+ # gr.Examples(examples, txt)
128
  prompt_html = gr.HTML()
129
+ gr.Examples(
130
+ examples=examples,
131
+ inputs=txt,
132
+ outputs=[chatbot, prompt_html],
133
+ fn=process_example,
134
+ cache_examples=True, )
135
+ # prompt_html.render()
136
  # Turn off interactivity while generating if you click
137
  txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
138
  bot, chatbot, [chatbot, prompt_html])
 
167
  )
168
  hyde_txt_btn = gr.Button(value="Submit text", scale=1)
169
 
 
170
  hyde_prompt_html = gr.HTML()
171
+ gr.Examples(
172
+ examples=examples,
173
+ inputs=hyde_txt,
174
+ outputs=[hyde_chatbot, hyde_prompt_html],
175
+ fn=process_example,
176
+ cache_examples=True, )
177
+ # prompt_html.render()
178
  # Turn off interactivity while generating if you click
179
  hyde_txt_msg = hyde_txt_btn.click(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt],
180
  queue=False).then(