Baweja commited on
Commit
39451f4
1 Parent(s): f1ec405

Rename app.py to app_RAG.py

Browse files
Files changed (2) hide show
  1. app.py +0 -422
  2. app_RAG.py +129 -0
app.py DELETED
@@ -1,422 +0,0 @@
1
- # import torch
2
- # import transformers
3
- # from transformers import RagRetriever, RagSequenceForGeneration, AutoTokenizer, AutoModelForCausalLM
4
- # import gradio as gr
5
-
6
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
7
-
8
-
9
- # dataset_path = "./5k_index_data/my_knowledge_dataset"
10
- # index_path = "./5k_index_data/my_knowledge_dataset_hnsw_index.faiss"
11
-
12
- # tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
13
- # retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
14
- # passages_path = dataset_path,
15
- # index_path = index_path,
16
- # n_docs = 5)
17
- # rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
18
- # rag_model.retriever.init_retrieval()
19
- # rag_model.to(device)
20
- # model = AutoModelForCausalLM.from_pretrained('HuggingFaceH4/zephyr-7b-beta',
21
- # device_map = 'auto',
22
- # torch_dtype = torch.bfloat16,
23
- # )
24
-
25
-
26
-
27
- # def strip_title(title):
28
- # if title.startswith('"'):
29
- # title = title[1:]
30
- # if title.endswith('"'):
31
- # title = title[:-1]
32
-
33
- # return title
34
-
35
- # # getting the correct format to input in gemma model
36
- # def input_format(query, context):
37
- # sys_instruction = f'Context:\n {context} \n Given the following information, generate answer to the question. Provide links in the answer from the information to increase credebility.'
38
- # message = f'Question: {query}'
39
-
40
- # return f'<bos><start_of_turn>\n{sys_instruction}' + f' {message}<end_of_turn>\n'
41
-
42
- # # retrieving and generating answer in one call
43
- # def retrieved_info(query, rag_model = rag_model, generating_model = model):
44
- # # Tokenize Query
45
- # retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
46
- # [query],
47
- # return_tensors = 'pt',
48
- # padding = True,
49
- # truncation = True,
50
- # )['input_ids'].to(device)
51
-
52
- # # Retrieve Documents
53
- # question_encoder_output = rag_model.rag.question_encoder(retriever_input_ids)
54
- # question_encoder_pool_output = question_encoder_output[0]
55
-
56
- # result = rag_model.retriever(
57
- # retriever_input_ids,
58
- # question_encoder_pool_output.cpu().detach().to(torch.float32).numpy(),
59
- # prefix = rag_model.rag.generator.config.prefix,
60
- # n_docs = rag_model.config.n_docs,
61
- # return_tensors = 'pt',
62
- # )
63
-
64
- # # Preparing query and retrieved docs for model
65
- # all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
66
- # retrieved_context = []
67
- # for docs in all_docs:
68
- # titles = [strip_title(title) for title in docs['title']]
69
- # texts = docs['text']
70
- # for title, text in zip(titles, texts):
71
- # retrieved_context.append(f'{title}: {text}')
72
-
73
- # generation_model_input = input_format(query, retrieved_context)
74
-
75
- # # Generating answer using gemma model
76
- # tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
77
- # input_ids = tokenizer(generation_model_input, return_tensors='pt').to(device)
78
- # output = generating_model.generate(input_ids, max_new_tokens = 256)
79
-
80
- # return tokenizer.decode(output[0])
81
-
82
-
83
-
84
-
85
-
86
-
87
- # def respond(
88
- # message,
89
- # history: list[tuple[str, str]],
90
- # system_message,
91
- # max_tokens ,
92
- # temperature,
93
- # top_p,
94
- # ):
95
- # if message: # If there's a user query
96
- # response = retrieved_info(message) # Get the answer from your local FAISS and Q&A model
97
- # return response
98
-
99
- # # In case no message, return an empty string
100
- # return ""
101
-
102
-
103
-
104
- # """
105
- # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
106
- # """
107
- # # Custom title and description
108
- # title = "🧠 Welcome to Your AI Knowledge Assistant"
109
- # description = """
110
- # Hi!! I am your loyal assistant. My functionality is based on the RAG model. I retrieve relevant information and provide answers based on that. Ask me any questions, and let me assist you.
111
- # My capabilities are limited because I am still in the development phase. I will do my best to assist you. SOOO LET'S BEGGINNNN......
112
- # """
113
-
114
- # demo = gr.ChatInterface(
115
- # respond,
116
- # type = 'messages',
117
- # additional_inputs=[
118
- # gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
119
- # gr.Slider(minimum=1, maximum=2048, value=256, step=1, label="Max new tokens"),
120
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
121
- # gr.Slider(
122
- # minimum=0.1,
123
- # maximum=1.0,
124
- # value=0.95,
125
- # step=0.05,
126
- # label="Top-p (nucleus sampling)",
127
- # ),
128
- # ],
129
- # title=title,
130
- # description=description,
131
- # textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
132
- # examples=[["✨Future of AI"], ["📱App Development"]],
133
- # example_icons=["🤖", "📱"],
134
- # theme="compact",
135
- # submit_btn = True,
136
- # )
137
-
138
-
139
- # if __name__ == "__main__":
140
- # demo.launch(share = True )
141
-
142
- # import torch
143
- # import transformers
144
- # from transformers import RagRetriever, RagSequenceForGeneration, AutoTokenizer, AutoModelForCausalLM
145
- # import gradio as gr
146
-
147
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
148
-
149
-
150
- # dataset_path = "./5k_index_data/my_knowledge_dataset"
151
- # index_path = "./5k_index_data/my_knowledge_dataset_hnsw_index.faiss"
152
-
153
- # tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
154
- # retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
155
- # passages_path = dataset_path,
156
- # index_path = index_path,
157
- # n_docs = 5)
158
- # rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
159
- # rag_model.retriever.init_retrieval()
160
- # rag_model.to(device)
161
- # model = AutoModelForCausalLM.from_pretrained('HuggingFaceH4/zephyr-7b-beta',
162
- # device_map = 'auto',
163
- # torch_dtype = torch.bfloat16,
164
- # )
165
-
166
-
167
-
168
- # def strip_title(title):
169
- # if title.startswith('"'):
170
- # title = title[1:]
171
- # if title.endswith('"'):
172
- # title = title[:-1]
173
-
174
- # return title
175
-
176
- # # getting the correct format to input in gemma model
177
- # def input_format(query, context):
178
- # # sys_instruction = f'Context:\n {context} \n Given the following information, generate answer to the question. Provide links in the answer from the information to increase credebility.'
179
- # # message = f'Question: {query}'
180
-
181
- # # return f'<bos><start_of_turn>\n{sys_instruction}' + f' {message}<end_of_turn>\n'
182
- # return [
183
- # {
184
- # "role": "system", "content": f'Context:\n {context} \n Given the following information, generate answer to the question. Provide links in the answer from the information to increase credebility.' },
185
-
186
- # {
187
- # "role": "user", "content": f"{query}"},
188
- # ]
189
-
190
- # # retrieving and generating answer in one call
191
- # def retrieved_info(query, rag_model = rag_model, generating_model = model):
192
- # # Tokenize Query
193
- # retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
194
- # [query],
195
- # return_tensors = 'pt',
196
- # padding = True,
197
- # truncation = True,
198
- # )['input_ids'].to(device)
199
-
200
- # # Retrieve Documents
201
- # question_encoder_output = rag_model.rag.question_encoder(retriever_input_ids)
202
- # question_encoder_pool_output = question_encoder_output[0]
203
-
204
- # result = rag_model.retriever(
205
- # retriever_input_ids,
206
- # question_encoder_pool_output.cpu().detach().to(torch.float32).numpy(),
207
- # prefix = rag_model.rag.generator.config.prefix,
208
- # n_docs = rag_model.config.n_docs,
209
- # return_tensors = 'pt',
210
- # )
211
-
212
- # # Preparing query and retrieved docs for model
213
- # all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
214
- # retrieved_context = []
215
- # for docs in all_docs:
216
- # titles = [strip_title(title) for title in docs['title']]
217
- # texts = docs['text']
218
- # for title, text in zip(titles, texts):
219
- # retrieved_context.append(f'{title}: {text}')
220
- # print(retrieved_context)
221
-
222
- # generation_model_input = input_format(query, retrieved_context[0])
223
-
224
- # # Generating answer using gemma model
225
- # tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
226
- # input_ids = tokenizer(generation_model_input, return_tensors='pt')['input_ids'].to(device)
227
- # output = generating_model.generate(input_ids, max_new_tokens = 256)
228
-
229
- # return tokenizer.decode(output[0])
230
-
231
-
232
- # def respond(
233
- # message,
234
- # history: list[tuple[str, str]],
235
- # system_message,
236
- # max_tokens ,
237
- # temperature,
238
- # top_p,
239
- # ):
240
- # if message: # If there's a user query
241
- # response = retrieved_info(message) # Get the answer from your local FAISS and Q&A model
242
- # return response
243
-
244
- # # In case no message, return an empty string
245
- # return ""
246
-
247
-
248
-
249
- # """
250
- # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
251
- # """
252
- # # Custom title and description
253
- # title = "🧠 Welcome to Your AI Knowledge Assistant"
254
- # description = """
255
- # Hi!! I am your loyal assistant. My functionality is based on the RAG model. I retrieve relevant information and provide answers based on that. Ask me any questions, and let me assist you.
256
- # My capabilities are limited because I am still in the development phase. I will do my best to assist you. SOOO LET'S BEGGINNNN......
257
- # """
258
-
259
- # demo = gr.ChatInterface(
260
- # respond,
261
- # type = 'messages',
262
- # additional_inputs=[
263
- # gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
264
- # gr.Slider(minimum=1, maximum=2048, value=256, step=1, label="Max new tokens"),
265
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
266
- # gr.Slider(
267
- # minimum=0.1,
268
- # maximum=1.0,
269
- # value=0.95,
270
- # step=0.05,
271
- # label="Top-p (nucleus sampling)",
272
- # ),
273
- # ],
274
- # title=title,
275
- # description=description,
276
- # textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
277
- # examples=[["✨Future of AI"], ["📱App Development"]],
278
- # #example_icons=["🤖", "📱"],
279
- # theme="compact",
280
- # submit_btn = True,
281
- # )
282
-
283
-
284
- # if __name__ == "__main__":
285
- # demo.launch(share = True,
286
- # show_error = True)
287
-
288
-
289
- import torch
290
- import transformers
291
- from transformers import RagRetriever, RagSequenceForGeneration, AutoTokenizer, AutoModelForCausalLM, pipeline
292
- import gradio as gr
293
-
294
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
295
-
296
-
297
- dataset_path = "./5k_index_data/my_knowledge_dataset"
298
- index_path = "./5k_index_data/my_knowledge_dataset_hnsw_index.faiss"
299
-
300
- retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
301
- passages_path = dataset_path,
302
- index_path = index_path,
303
- n_docs = 5)
304
- rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
305
- rag_model.retriever.init_retrieval()
306
- rag_model.to(device)
307
-
308
- pipe = pipeline(
309
- "text-generation",
310
- model="google/gemma-2-2b-it",
311
- model_kwargs={"torch_dtype": torch.bfloat16},
312
- device=device,
313
- )
314
-
315
- def strip_title(title):
316
- if title.startswith('"'):
317
- title = title[1:]
318
- if title.endswith('"'):
319
- title = title[:-1]
320
-
321
- return title
322
-
323
-
324
- def retrieved_info(query, rag_model = rag_model):
325
- # Tokenize Query
326
- retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
327
- [query],
328
- return_tensors = 'pt',
329
- padding = True,
330
- truncation = True,
331
- )['input_ids'].to(device)
332
-
333
- # Retrieve Documents
334
- question_encoder_output = rag_model.rag.question_encoder(retriever_input_ids)
335
- question_encoder_pool_output = question_encoder_output[0]
336
-
337
- result = rag_model.retriever(
338
- retriever_input_ids,
339
- question_encoder_pool_output.cpu().detach().to(torch.float32).numpy(),
340
- prefix = rag_model.rag.generator.config.prefix,
341
- n_docs = rag_model.config.n_docs,
342
- return_tensors = 'pt',
343
- )
344
-
345
- # Preparing query and retrieved docs for model
346
- all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
347
- retrieved_context = []
348
- for docs in all_docs:
349
- titles = [strip_title(title) for title in docs['title']]
350
- texts = docs['text']
351
- for title, text in zip(titles, texts):
352
- retrieved_context.append(f'{title}: {text}')
353
-
354
-
355
- # Generating answer using gemma model
356
-
357
- messages = [
358
- {"role": "user", "content": f"{query}"},
359
- {"role": "system" , "content": f"Context: {retrieved_context}. Use the links and information from the Context to answer the query in brief. Provide links in the answer."}
360
- ]
361
-
362
- outputs = pipe(messages, max_new_tokens=256)
363
- assistant_response = outputs[0]["generated_text"][-1]["content"].strip()
364
-
365
- return assistant_response
366
-
367
-
368
-
369
- def respond(
370
- message,
371
- history: list[tuple[str, str]],
372
- system_message,
373
- max_tokens ,
374
- temperature,
375
- top_p,
376
- ):
377
- if message: # If there's a user query
378
- response = retrieved_info(message) # Get the answer from your local FAISS and Q&A model
379
- return response
380
-
381
- # In case no message, return an empty string
382
- return ""
383
-
384
-
385
-
386
- """
387
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
388
- """
389
- # Custom title and description
390
- title = "🧠 Welcome to Your AI Knowledge Assistant"
391
- description = """
392
- HI!!, I am your loyal assistant, y functionality is based on RAG model, I retrieves relevant information and provide answers based on that. Ask me any question, and let me assist you.
393
- My capabilities are limited because I am still in development phase. I will do my best to assist you. SOOO LET'S BEGGINNNN......
394
- """
395
-
396
-
397
- demo = gr.ChatInterface(
398
- respond,
399
- type = 'messages',
400
- additional_inputs=[
401
- gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
402
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
403
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
404
- gr.Slider(
405
- minimum=0.1,
406
- maximum=1.0,
407
- value=0.95,
408
- step=0.05,
409
- label="Top-p (nucleus sampling)",
410
- ),
411
- ],
412
- title=title,
413
- description=description,
414
- submit_btn = True,
415
- textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
416
- examples=[["Future of AI"], ["App Development"]],
417
- theme="compact",
418
- )
419
-
420
-
421
- if __name__ == "__main__":
422
- demo.launch(share = True )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_RAG.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import transformers
3
+ from transformers import RagRetriever, RagSequenceForGeneration, AutoModelForCausalLM, pipeline
4
+ import gradio as gr
5
+
6
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
7
+
8
+ dataset_path = "./5k_index_data/my_knowledge_dataset"
9
+ index_path = "./5k_index_data/my_knowledge_dataset_hnsw_index.faiss"
10
+
11
+ retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
12
+ passages_path = dataset_path,
13
+ index_path = index_path,
14
+ n_docs = 5)
15
+ rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
16
+ rag_model.retriever.init_retrieval()
17
+ rag_model.to(device)
18
+
19
+ pipe = pipeline(
20
+ "text-generation",
21
+ model="google/gemma-2-2b-it",
22
+ model_kwargs={"torch_dtype": torch.bfloat16},
23
+ device=device,
24
+ )
25
+
26
+ def strip_title(title):
27
+ if title.startswith('"'):
28
+ title = title[1:]
29
+ if title.endswith('"'):
30
+ title = title[:-1]
31
+
32
+ return title
33
+
34
+
35
+ def retrieved_info(query, rag_model = rag_model):
36
+ # Tokenize Query
37
+ retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
38
+ [query],
39
+ return_tensors = 'pt',
40
+ padding = True,
41
+ truncation = True,
42
+ )['input_ids'].to(device)
43
+
44
+ # Retrieve Documents
45
+ question_encoder_output = rag_model.rag.question_encoder(retriever_input_ids)
46
+ question_encoder_pool_output = question_encoder_output[0]
47
+
48
+ result = rag_model.retriever(
49
+ retriever_input_ids,
50
+ question_encoder_pool_output.cpu().detach().to(torch.float32).numpy(),
51
+ prefix = rag_model.rag.generator.config.prefix,
52
+ n_docs = rag_model.config.n_docs,
53
+ return_tensors = 'pt',
54
+ )
55
+
56
+ # Preparing query and retrieved docs for model
57
+ all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
58
+ retrieved_context = []
59
+ for docs in all_docs:
60
+ titles = [strip_title(title) for title in docs['title']]
61
+ texts = docs['text']
62
+ for title, text in zip(titles, texts):
63
+ retrieved_context.append(f'{title}: {text}')
64
+
65
+
66
+ # Generating answer using gemma model
67
+
68
+ messages = [
69
+ {"role": "user", "content": f"{query}"},
70
+ {"role": "system" , "content": f"Context: {retrieved_context}. Use the links and information from the Context to answer the query in brief. Provide links in the answer."}
71
+ ]
72
+
73
+ outputs = pipe(messages, max_new_tokens=256)
74
+ assistant_response = outputs[0]["generated_text"][-1]["content"].strip()
75
+
76
+ return assistant_response
77
+
78
+
79
+ def respond(
80
+ message,
81
+ history: list[tuple[str, str]],
82
+ system_message,
83
+ max_tokens ,
84
+ temperature,
85
+ top_p,
86
+ ):
87
+ if message: # If there's a user query
88
+ response = retrieved_info(message) # Get the answer from your local FAISS and Q&A model
89
+ return response
90
+
91
+ # In case no message, return an empty string
92
+ return ""
93
+
94
+
95
+ """
96
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
97
+ """
98
+ # Custom title and description
99
+ title = "🧠 Welcome to Your AI Knowledge Assistant"
100
+ description = """
101
+ HI!!, I am your loyal assistant, y functionality is based on RAG model, I retrieves relevant information and provide answers based on that. Ask me any question, and let me assist you.
102
+ My capabilities are limited because I am still in development phase. I will do my best to assist you. SOOO LET'S BEGGINNNN......
103
+ """
104
+
105
+ demo = gr.ChatInterface(
106
+ respond,
107
+ type = 'messages',
108
+ additional_inputs=[
109
+ gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
110
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
111
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
112
+ gr.Slider(
113
+ minimum=0.1,
114
+ maximum=1.0,
115
+ value=0.95,
116
+ step=0.05,
117
+ label="Top-p (nucleus sampling)",
118
+ ),
119
+ ],
120
+ title=title,
121
+ description=description,
122
+ submit_btn = True,
123
+ textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
124
+ examples=[["Future of AI"], ["App Development"]],
125
+ theme="compact",
126
+ )
127
+
128
+ if __name__ == "__main__":
129
+ demo.launch(share = True )