barunsaha commited on
Commit
2605d55
1 Parent(s): e690364

Rename new chat app and legacy app files

Browse files
Files changed (3) hide show
  1. app.py +304 -191
  2. chat_app.py +0 -404
  3. legacy_app.py +291 -0
app.py CHANGED
@@ -1,282 +1,395 @@
1
- import pathlib
2
  import logging
 
 
3
  import tempfile
4
- from typing import List, Tuple
5
 
6
  import json5
7
- import metaphor_python as metaphor
8
  import streamlit as st
 
 
 
 
 
 
9
 
10
- from helpers import llm_helper, pptx_helper
11
  from global_config import GlobalConfig
 
12
 
13
 
14
- APP_TEXT = json5.loads(open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8').read())
15
- GB_CONVERTER = 2 ** 30
16
-
 
 
 
17
 
18
- logger = logging.getLogger(__name__)
 
19
 
20
 
21
  @st.cache_data
22
- def get_contents_wrapper(text: str) -> str:
23
  """
24
- Fetch and cache the slide deck contents on a topic by calling an external API.
25
 
26
- :param text: The presentation topic.
27
- :return: The slide deck contents or outline in JSON format.
28
  """
29
 
30
- logger.info('LLM call because of cache miss...')
31
- return llm_helper.generate_slides_content(text).strip()
 
 
 
 
 
 
32
 
33
 
34
  @st.cache_resource
35
- def get_metaphor_client_wrapper() -> metaphor.Metaphor:
36
  """
37
- Create a Metaphor client for semantic Web search.
38
 
39
- :return: Metaphor instance.
40
  """
41
 
42
- return metaphor.Metaphor(api_key=GlobalConfig.METAPHOR_API_KEY)
 
 
43
 
44
 
45
- @st.cache_data
46
- def get_web_search_results_wrapper(text: str) -> List[Tuple[str, str]]:
47
- """
48
- Fetch and cache the Web search results on a given topic.
49
 
50
- :param text: The topic.
51
- :return: A list of (title, link) tuples.
52
- """
53
-
54
- results = []
55
- search_results = get_metaphor_client_wrapper().search(
56
- text,
57
- use_autoprompt=True,
58
- num_results=5
59
- )
60
 
61
- for a_result in search_results.results:
62
- results.append((a_result.title, a_result.url))
63
 
64
- return results
 
 
 
 
 
 
 
65
 
66
 
67
- def build_ui():
68
  """
69
- Display the input elements for content generation. Only covers the first step.
70
  """
71
 
72
- # get_disk_used_percentage()
73
-
74
  st.title(APP_TEXT['app_name'])
75
  st.subheader(APP_TEXT['caption'])
76
- st.markdown(
77
- 'Powered by'
78
- ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).'
79
- )
80
- st.markdown(
81
- '*If the JSON is generated or parsed incorrectly, try again later by making minor changes'
82
- ' to the input text.*'
83
- )
84
 
85
- with st.form('my_form'):
86
- # Topic input
87
- try:
88
- with open(GlobalConfig.PRELOAD_DATA_FILE, 'r', encoding='utf-8') as in_file:
89
- preload_data = json5.loads(in_file.read())
90
- except (FileExistsError, FileNotFoundError):
91
- preload_data = {'topic': '', 'audience': ''}
92
-
93
- topic = st.text_area(
94
- APP_TEXT['input_labels'][0],
95
- value=preload_data['topic']
96
- )
97
 
98
- texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
99
- captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
100
-
101
- pptx_template = st.radio(
102
- 'Select a presentation template:',
103
- texts,
104
- captions=captions,
105
- horizontal=True
106
- )
107
 
108
- st.divider()
109
- submit = st.form_submit_button('Generate slide deck')
110
 
111
- if submit:
112
- # st.write(f'Clicked {time.time()}')
113
- st.session_state.submitted = True
114
 
115
- # https://github.com/streamlit/streamlit/issues/3832#issuecomment-1138994421
116
- if 'submitted' in st.session_state:
117
- progress_text = 'Generating the slides...give it a moment'
118
- progress_bar = st.progress(0, text=progress_text)
119
 
120
- topic_txt = topic.strip()
121
- generate_presentation(topic_txt, pptx_template, progress_bar)
122
 
123
- st.divider()
124
- st.text(APP_TEXT['tos'])
125
- st.text(APP_TEXT['tos2'])
126
 
127
- st.markdown(
128
- '![Visitors]'
129
- '(https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)'
130
- )
131
 
132
 
133
- def generate_presentation(topic: str, pptx_template: str, progress_bar):
134
  """
135
- Process the inputs to generate the slides.
136
-
137
- :param topic: The presentation topic based on which contents are to be generated.
138
- :param pptx_template: The PowerPoint template name to be used.
139
- :param progress_bar: Progress bar from the page.
140
  """
141
 
142
- topic_length = len(topic)
143
- logger.debug('Input length:: topic: %s', topic_length)
144
-
145
- if topic_length >= 10:
146
- logger.debug('Topic: %s', topic)
147
- target_length = min(topic_length, GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH)
148
 
149
- try:
150
- # Step 1: Generate the contents in JSON format using an LLM
151
- json_str = process_slides_contents(topic[:target_length], progress_bar)
152
- logger.debug('Truncated topic: %s', topic[:target_length])
153
- logger.debug('Length of JSON: %d', len(json_str))
154
 
155
- # Step 2: Generate the slide deck based on the template specified
156
- if len(json_str) > 0:
157
- st.info(
158
- 'Tip: The generated content doesn\'t look so great?'
159
- ' Need alternatives? Just change your description text and try again.',
160
- icon="💡️"
161
- )
162
- else:
163
- st.error(
164
- 'Unfortunately, JSON generation failed, so the next steps would lead'
165
- ' to nowhere. Try again or come back later.'
166
- )
167
- return
168
 
169
- all_headers = generate_slide_deck(json_str, pptx_template, progress_bar)
170
 
171
- # Step 3: Bonus stuff: Web references and AI art
172
- show_bonus_stuff(all_headers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
- except ValueError as ve:
175
- st.error(f'Unfortunately, an error occurred: {ve}! '
176
- f'Please change the text, try again later, or report it, sharing your inputs.')
177
 
 
 
 
178
  else:
179
- st.error('Not enough information provided! Please be little more descriptive :)')
 
 
 
180
 
 
181
 
182
- def process_slides_contents(text: str, progress_bar: st.progress) -> str:
183
- """
184
- Convert given text into structured data and display. Update the UI.
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- :param text: The topic description for the presentation.
187
- :param progress_bar: Progress bar for this step.
188
- :return: The contents as a JSON-formatted string.
189
  """
 
190
 
191
- json_str = ''
 
 
192
 
193
- try:
194
- logger.info('Calling LLM for content generation on the topic: %s', text)
195
- json_str = get_contents_wrapper(text)
196
- except Exception as ex:
197
  st.error(
198
- f'An exception occurred while trying to convert to JSON. It could be because of heavy'
199
- f' traffic or something else. Try doing it again or try again later.'
200
- f'\nError message: {ex}'
201
  )
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- progress_bar.progress(50, text='Contents generated')
 
204
 
205
- with st.expander('The generated contents (in JSON format)'):
206
- st.code(json_str, language='json')
 
 
207
 
208
- return json_str
209
 
210
 
211
- def generate_slide_deck(json_str: str, pptx_template: str, progress_bar) -> List:
212
  """
213
- Create a slide deck.
214
 
215
- :param json_str: The contents in JSON format.
216
- :param pptx_template: The PPTX template name.
217
- :param progress_bar: Progress bar.
218
- :return: A list of all slide headers and the title.
219
  """
220
 
221
- progress_text = 'Creating the slide deck...give it a moment'
222
- progress_bar.progress(75, text=progress_text)
 
223
 
224
- # # Get a unique name for the file to save -- use the session ID
225
- # ctx = st_sr.get_script_run_ctx()
226
- # session_id = ctx.session_id
227
- # timestamp = time.time()
228
- # output_file_name = f'{session_id}_{timestamp}.pptx'
229
 
230
- temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
231
- path = pathlib.Path(temp.name)
 
232
 
233
- logger.info('Creating PPTX file...')
234
- all_headers = pptx_helper.generate_powerpoint_presentation(
235
- json_str,
236
- slides_template=pptx_template,
237
- output_file_path=path
238
- )
239
- progress_bar.progress(100, text='Done!')
240
 
241
- with open(path, 'rb') as f:
242
- st.download_button('Download PPTX file', f, file_name='Presentation.pptx')
243
 
244
- return all_headers
 
 
 
 
 
245
 
 
 
246
 
247
- def show_bonus_stuff(ppt_headers: List[str]):
248
  """
249
- Show bonus stuff for the presentation.
 
250
 
251
- :param ppt_headers: A list of the slide headings.
 
252
  """
253
 
254
- # Use the presentation title and the slide headers to find relevant info online
255
- logger.info('Calling Metaphor search...')
256
- ppt_text = ' '.join(ppt_headers)
257
- search_results = get_web_search_results_wrapper(ppt_text)
258
- md_text_items = []
259
 
260
- for (title, link) in search_results:
261
- md_text_items.append(f'[{title}]({link})')
 
 
 
 
262
 
263
- with st.expander('Related Web references'):
264
- st.markdown('\n\n'.join(md_text_items))
 
 
 
 
 
 
 
 
 
 
265
 
266
- logger.info('Done!')
267
 
268
- # # Avoid image generation. It costs time and an API call, so just limit to the text generation.
269
- # with st.expander('AI-generated image on the presentation topic'):
270
- # logger.info('Calling SDXL for image generation...')
271
- # # img_empty.write('')
272
- # # img_text.write(APP_TEXT['image_info'])
273
- # image = get_ai_image_wrapper(ppt_text)
274
- #
275
- # if len(image) > 0:
276
- # image = base64.b64decode(image)
277
- # st.image(image, caption=ppt_text)
278
- # st.info('Tip: Right-click on the image to save it.', icon="💡️")
279
- # logger.info('Image added')
 
 
 
280
 
281
 
282
  def main():
 
1
+ import datetime
2
  import logging
3
+ import pathlib
4
+ import random
5
  import tempfile
6
+ from typing import List
7
 
8
  import json5
 
9
  import streamlit as st
10
+ from langchain_community.chat_message_histories import (
11
+ StreamlitChatMessageHistory
12
+ )
13
+ from langchain_core.messages import HumanMessage
14
+ from langchain_core.prompts import ChatPromptTemplate
15
+ from transformers import AutoTokenizer
16
 
 
17
  from global_config import GlobalConfig
18
+ from helpers import llm_helper, pptx_helper
19
 
20
 
21
+ @st.cache_data
22
+ def _load_strings() -> dict:
23
+ """
24
+ Load various strings to be displayed in the app.
25
+ :return: The dictionary of strings.
26
+ """
27
 
28
+ with open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8') as in_file:
29
+ return json5.loads(in_file.read())
30
 
31
 
32
  @st.cache_data
33
+ def _get_prompt_template(is_refinement: bool) -> str:
34
  """
35
+ Return a prompt template.
36
 
37
+ :param is_refinement: Whether this is the initial or refinement prompt.
38
+ :return: The prompt template as f-string.
39
  """
40
 
41
+ if is_refinement:
42
+ with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
43
+ template = in_file.read()
44
+ else:
45
+ with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
46
+ template = in_file.read()
47
+
48
+ return template
49
 
50
 
51
  @st.cache_resource
52
+ def _get_tokenizer() -> AutoTokenizer:
53
  """
54
+ Get Mistral tokenizer for counting tokens.
55
 
56
+ :return: The tokenizer.
57
  """
58
 
59
+ return AutoTokenizer.from_pretrained(
60
+ pretrained_model_name_or_path=GlobalConfig.HF_LLM_MODEL_NAME
61
+ )
62
 
63
 
64
+ APP_TEXT = _load_strings()
 
 
 
65
 
66
+ # Session variables
67
+ CHAT_MESSAGES = 'chat_messages'
68
+ DOWNLOAD_FILE_KEY = 'download_file_name'
69
+ IS_IT_REFINEMENT = 'is_it_refinement'
 
 
 
 
 
 
70
 
71
+ logger = logging.getLogger(__name__)
72
+ progress_bar = st.progress(0, text='Setting up SlideDeck AI...')
73
 
74
+ texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
75
+ captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
76
+ pptx_template = st.sidebar.radio(
77
+ 'Select a presentation template:',
78
+ texts,
79
+ captions=captions,
80
+ horizontal=True
81
+ )
82
 
83
 
84
+ def display_page_header_content():
85
  """
86
+ Display content in the page header.
87
  """
88
 
 
 
89
  st.title(APP_TEXT['app_name'])
90
  st.subheader(APP_TEXT['caption'])
91
+ # st.markdown(
92
+ # '![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)' # noqa: E501
93
+ # )
 
 
 
 
 
94
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ def display_page_footer_content():
97
+ """
98
+ Display content in the page footer.
99
+ """
 
 
 
 
 
100
 
101
+ st.text(APP_TEXT['tos'] + '\n\n' + APP_TEXT['tos2'])
 
102
 
 
 
 
103
 
104
+ def build_ui():
105
+ """
106
+ Display the input elements for content generation.
107
+ """
108
 
109
+ display_page_header_content()
 
110
 
111
+ with st.expander('Usage Policies and Limitations'):
112
+ display_page_footer_content()
 
113
 
114
+ progress_bar.progress(50, text='Setting up chat interface...')
115
+ set_up_chat_ui()
 
 
116
 
117
 
118
+ def set_up_chat_ui():
119
  """
120
+ Prepare the chat interface and related functionality.
 
 
 
 
121
  """
122
 
123
+ with st.expander('Usage Instructions'):
124
+ st.write(GlobalConfig.CHAT_USAGE_INSTRUCTIONS)
125
+ st.markdown(
126
+ 'SlideDeck AI is powered by'
127
+ ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)'
128
+ )
129
 
130
+ # view_messages = st.expander('View the messages in the session state')
 
 
 
 
131
 
132
+ st.chat_message('ai').write(
133
+ random.choice(APP_TEXT['ai_greetings'])
134
+ )
135
+ progress_bar.progress(100, text='Done!')
136
+ progress_bar.empty()
 
 
 
 
 
 
 
 
137
 
138
+ history = StreamlitChatMessageHistory(key=CHAT_MESSAGES)
139
 
140
+ if _is_it_refinement():
141
+ template = _get_prompt_template(is_refinement=True)
142
+ logger.debug('Getting refinement template')
143
+ else:
144
+ template = _get_prompt_template(is_refinement=False)
145
+ logger.debug('Getting initial template')
146
+
147
+ prompt_template = ChatPromptTemplate.from_template(template)
148
+
149
+ # Since Streamlit app reloads at every interaction, display the chat history
150
+ # from the save session state
151
+ for msg in history.messages:
152
+ msg_type = msg.type
153
+ if msg_type == 'user':
154
+ st.chat_message(msg_type).write(msg.content)
155
+ else:
156
+ st.chat_message(msg_type).code(msg.content, language='json')
157
+
158
+ if prompt := st.chat_input(
159
+ placeholder=APP_TEXT['chat_placeholder'],
160
+ max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
161
+ ):
162
+
163
+ progress_bar_pptx = st.progress(0, 'Preparing to run...')
164
+ if not _is_valid_prompt(prompt):
165
+ return
166
+
167
+ logger.info('User input: %s', prompt)
168
+ st.chat_message('user').write(prompt)
169
+
170
+ user_messages = _get_user_messages()
171
+ user_messages.append(prompt)
172
+ list_of_msgs = [
173
+ f'{idx + 1}. {msg}' for idx, msg in enumerate(user_messages)
174
+ ]
175
+ list_of_msgs = '\n'.join(list_of_msgs)
176
+
177
+ if _is_it_refinement():
178
+ formatted_template = prompt_template.format(
179
+ **{
180
+ 'instructions': list_of_msgs,
181
+ 'previous_content': _get_last_response()
182
+ }
183
+ )
184
+ else:
185
+ formatted_template = prompt_template.format(
186
+ **{
187
+ 'question': prompt,
188
+ }
189
+ )
190
+
191
+ progress_bar_pptx.progress(5, 'Calling LLM...will retry if connection times out...')
192
+ response: dict = llm_helper.hf_api_query({
193
+ 'inputs': formatted_template,
194
+ 'parameters': {
195
+ 'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
196
+ 'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
197
+ 'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
198
+ 'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
199
+ 'num_return_sequences': 1,
200
+ 'return_full_text': False,
201
+ # "repetition_penalty": 0.0001
202
+ },
203
+ 'options': {
204
+ 'wait_for_model': True,
205
+
206
+ 'use_cache': True
207
+ }
208
+ })
209
+
210
+ if len(response) > 0 and 'generated_text' in response[0]:
211
+ response: str = response[0]['generated_text'].strip()
212
+
213
+ st.chat_message('ai').code(response, language='json')
214
+
215
+ history.add_user_message(prompt)
216
+ history.add_ai_message(response)
217
+
218
+ if GlobalConfig.COUNT_TOKENS:
219
+ tokenizer = _get_tokenizer()
220
+ tokens_count_in = len(tokenizer.tokenize(formatted_template))
221
+ tokens_count_out = len(tokenizer.tokenize(response))
222
+ logger.debug(
223
+ 'Tokens count:: input: %d, output: %d',
224
+ tokens_count_in, tokens_count_out
225
+ )
226
+
227
+ # _display_messages_history(view_messages)
228
+
229
+ # The content has been generated as JSON
230
+ # There maybe trailing ``` at the end of the response -- remove them
231
+ # To be careful: ``` may be part of the content as well when code is generated
232
+ progress_bar_pptx.progress(50, 'Analyzing response...')
233
+ response_cleaned = _clean_json(response)
234
+
235
+ # Now create the PPT file
236
+ progress_bar_pptx.progress(75, 'Creating the slide deck...give it a moment')
237
+ generate_slide_deck(response_cleaned)
238
+ progress_bar_pptx.progress(100, text='Done!')
239
+
240
+
241
+ def generate_slide_deck(json_str: str):
242
+ """
243
+ Create a slide deck.
244
 
245
+ :param json_str: The content in *valid* JSON format.
246
+ """
 
247
 
248
+ if DOWNLOAD_FILE_KEY in st.session_state:
249
+ path = pathlib.Path(st.session_state[DOWNLOAD_FILE_KEY])
250
+ logger.debug('DOWNLOAD_FILE_KEY found in session')
251
  else:
252
+ temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
253
+ path = pathlib.Path(temp.name)
254
+ st.session_state[DOWNLOAD_FILE_KEY] = str(path)
255
+ logger.debug('DOWNLOAD_FILE_KEY not found in session')
256
 
257
+ logger.debug('Creating PPTX file: %s...', st.session_state[DOWNLOAD_FILE_KEY])
258
 
259
+ try:
260
+ pptx_helper.generate_powerpoint_presentation(
261
+ json_str,
262
+ slides_template=pptx_template,
263
+ output_file_path=path
264
+ )
265
+
266
+ _display_download_button(path)
267
+ except ValueError as ve:
268
+ st.error(APP_TEXT['json_parsing_error'])
269
+ logger.error('%s', APP_TEXT['json_parsing_error'])
270
+ logger.error('Additional error info: %s', str(ve))
271
+ except Exception as ex:
272
+ st.error(APP_TEXT['content_generation_error'])
273
+ logger.error('Caught a generic exception: %s', str(ex))
274
 
275
+
276
+ def _is_valid_prompt(prompt: str) -> bool:
 
277
  """
278
+ Verify whether user input satisfies the concerned constraints.
279
 
280
+ :param prompt: The user input text.
281
+ :return: True if all criteria are satisfied; False otherwise.
282
+ """
283
 
284
+ if len(prompt) < 5 or ' ' not in prompt:
 
 
 
285
  st.error(
286
+ 'Not enough information provided!'
287
+ ' Please be a little more descriptive and type a few words with a few characters :)'
 
288
  )
289
+ return False
290
+
291
+ return True
292
+
293
+
294
+ def _is_it_refinement() -> bool:
295
+ """
296
+ Whether it is the initial prompt or a refinement.
297
+
298
+ :return: True if it is the initial prompt; False otherwise.
299
+ """
300
 
301
+ if IS_IT_REFINEMENT in st.session_state:
302
+ return True
303
 
304
+ if len(st.session_state[CHAT_MESSAGES]) >= 2:
305
+ # Prepare for the next call
306
+ st.session_state[IS_IT_REFINEMENT] = True
307
+ return True
308
 
309
+ return False
310
 
311
 
312
+ def _get_user_messages() -> List[str]:
313
  """
314
+ Get a list of user messages submitted until now from the session state.
315
 
316
+ :return: The list of user messages.
 
 
 
317
  """
318
 
319
+ return [
320
+ msg.content for msg in st.session_state[CHAT_MESSAGES] if isinstance(msg, HumanMessage)
321
+ ]
322
 
 
 
 
 
 
323
 
324
+ def _get_last_response() -> str:
325
+ """
326
+ Get the last response generated by AI.
327
 
328
+ :return: The response text.
329
+ """
330
+
331
+ return st.session_state[CHAT_MESSAGES][-1].content
 
 
 
332
 
 
 
333
 
334
+ def _display_messages_history(view_messages: st.expander):
335
+ """
336
+ Display the history of messages.
337
+
338
+ :param view_messages: The list of AI and Human messages.
339
+ """
340
 
341
+ with view_messages:
342
+ view_messages.json(st.session_state[CHAT_MESSAGES])
343
 
344
+ def _clean_json(json_str: str) -> str:
345
  """
346
+ Attempt to clean a JSON response string from the LLM by removing the trailing ```
347
+ and any text beyond that. May not be always accurate.
348
 
349
+ :param json_str: The input string in JSON format.
350
+ :return: The "cleaned" JSON string.
351
  """
352
 
353
+ str_len = len(json_str)
354
+ response_cleaned = json_str
 
 
 
355
 
356
+ try:
357
+ idx = json_str.rindex('```')
358
+ logger.debug(
359
+ 'Fixing JSON response: str_len: %d, idx of ```: %d',
360
+ str_len, idx
361
+ )
362
 
363
+ if idx + 3 == str_len:
364
+ # The response ends with ``` -- most likely the end of JSON response string
365
+ response_cleaned = json_str[:idx]
366
+ elif idx + 3 < str_len:
367
+ # Looks like there are some more content beyond the last ```
368
+ # In the best case, it would be some additional plain-text response from the LLM
369
+ # and is unlikely to contain } or ] that are present in JSON
370
+ if '}' not in json_str[idx + 3:]: # the remainder of the text
371
+ response_cleaned = json_str[:idx]
372
+ except ValueError:
373
+ # No ``` found
374
+ pass
375
 
376
+ return response_cleaned
377
 
378
+
379
+ def _display_download_button(file_path: pathlib.Path):
380
+ """
381
+ Display a download button to download a slide deck.
382
+
383
+ :param file_path: The path of the .pptx file.
384
+ """
385
+
386
+ with open(file_path, 'rb') as download_file:
387
+ st.download_button(
388
+ 'Download PPTX file ⬇️',
389
+ data=download_file,
390
+ file_name='Presentation.pptx',
391
+ key=datetime.datetime.now()
392
+ )
393
 
394
 
395
  def main():
chat_app.py DELETED
@@ -1,404 +0,0 @@
1
- import datetime
2
- import logging
3
- import pathlib
4
- import random
5
- import tempfile
6
- from typing import List
7
-
8
- import json5
9
- import streamlit as st
10
- from langchain_community.chat_message_histories import (
11
- StreamlitChatMessageHistory
12
- )
13
- from langchain_core.messages import HumanMessage
14
- from langchain_core.prompts import ChatPromptTemplate
15
- from transformers import AutoTokenizer
16
-
17
- from global_config import GlobalConfig
18
- from helpers import llm_helper, pptx_helper
19
-
20
-
21
- @st.cache_data
22
- def _load_strings() -> dict:
23
- """
24
- Load various strings to be displayed in the app.
25
- :return: The dictionary of strings.
26
- """
27
-
28
- with open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8') as in_file:
29
- return json5.loads(in_file.read())
30
-
31
-
32
- @st.cache_data
33
- def _get_prompt_template(is_refinement: bool) -> str:
34
- """
35
- Return a prompt template.
36
-
37
- :param is_refinement: Whether this is the initial or refinement prompt.
38
- :return: The prompt template as f-string.
39
- """
40
-
41
- if is_refinement:
42
- with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
43
- template = in_file.read()
44
- else:
45
- with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
46
- template = in_file.read()
47
-
48
- return template
49
-
50
-
51
- @st.cache_resource
52
- def _get_tokenizer() -> AutoTokenizer:
53
- """
54
- Get Mistral tokenizer for counting tokens.
55
-
56
- :return: The tokenizer.
57
- """
58
-
59
- return AutoTokenizer.from_pretrained(
60
- pretrained_model_name_or_path=GlobalConfig.HF_LLM_MODEL_NAME
61
- )
62
-
63
-
64
- APP_TEXT = _load_strings()
65
-
66
- # Session variables
67
- CHAT_MESSAGES = 'chat_messages'
68
- DOWNLOAD_FILE_KEY = 'download_file_name'
69
- IS_IT_REFINEMENT = 'is_it_refinement'
70
-
71
- logger = logging.getLogger(__name__)
72
- progress_bar = st.progress(0, text='Setting up SlideDeck AI...')
73
-
74
- texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
75
- captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
76
- pptx_template = st.sidebar.radio(
77
- 'Select a presentation template:',
78
- texts,
79
- captions=captions,
80
- horizontal=True
81
- )
82
-
83
-
84
- def display_page_header_content():
85
- """
86
- Display content in the page header.
87
- """
88
-
89
- st.title(APP_TEXT['app_name'])
90
- st.subheader(APP_TEXT['caption'])
91
- # st.markdown(
92
- # '![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)' # noqa: E501
93
- # )
94
-
95
-
96
- def display_page_footer_content():
97
- """
98
- Display content in the page footer.
99
- """
100
-
101
- st.text(APP_TEXT['tos'] + '\n\n' + APP_TEXT['tos2'])
102
-
103
-
104
- def build_ui():
105
- """
106
- Display the input elements for content generation.
107
- """
108
-
109
- display_page_header_content()
110
-
111
- with st.expander('Usage Policies and Limitations'):
112
- display_page_footer_content()
113
-
114
- progress_bar.progress(50, text='Setting up chat interface...')
115
- set_up_chat_ui()
116
-
117
-
118
- def set_up_chat_ui():
119
- """
120
- Prepare the chat interface and related functionality.
121
- """
122
-
123
- with st.expander('Usage Instructions'):
124
- st.write(GlobalConfig.CHAT_USAGE_INSTRUCTIONS)
125
- st.markdown(
126
- 'SlideDeck AI is powered by'
127
- ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)'
128
- )
129
-
130
- # view_messages = st.expander('View the messages in the session state')
131
-
132
- st.chat_message('ai').write(
133
- random.choice(APP_TEXT['ai_greetings'])
134
- )
135
- progress_bar.progress(100, text='Done!')
136
- progress_bar.empty()
137
-
138
- history = StreamlitChatMessageHistory(key=CHAT_MESSAGES)
139
-
140
- if _is_it_refinement():
141
- template = _get_prompt_template(is_refinement=True)
142
- logger.debug('Getting refinement template')
143
- else:
144
- template = _get_prompt_template(is_refinement=False)
145
- logger.debug('Getting initial template')
146
-
147
- prompt_template = ChatPromptTemplate.from_template(template)
148
-
149
- # Since Streamlit app reloads at every interaction, display the chat history
150
- # from the save session state
151
- for msg in history.messages:
152
- msg_type = msg.type
153
- if msg_type == 'user':
154
- st.chat_message(msg_type).write(msg.content)
155
- else:
156
- st.chat_message(msg_type).code(msg.content, language='json')
157
-
158
- if prompt := st.chat_input(
159
- placeholder=APP_TEXT['chat_placeholder'],
160
- max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
161
- ):
162
-
163
- progress_bar_pptx = st.progress(0, 'Preparing to run...')
164
- if not _is_valid_prompt(prompt):
165
- return
166
-
167
- logger.info('User input: %s', prompt)
168
- st.chat_message('user').write(prompt)
169
-
170
- user_messages = _get_user_messages()
171
- user_messages.append(prompt)
172
- list_of_msgs = [
173
- f'{idx + 1}. {msg}' for idx, msg in enumerate(user_messages)
174
- ]
175
- list_of_msgs = '\n'.join(list_of_msgs)
176
-
177
- if _is_it_refinement():
178
- formatted_template = prompt_template.format(
179
- **{
180
- 'instructions': list_of_msgs,
181
- 'previous_content': _get_last_response()
182
- }
183
- )
184
- else:
185
- formatted_template = prompt_template.format(
186
- **{
187
- 'question': prompt,
188
- }
189
- )
190
-
191
- progress_bar_pptx.progress(5, 'Calling LLM...will retry if connection times out...')
192
- response: dict = llm_helper.hf_api_query({
193
- 'inputs': formatted_template,
194
- 'parameters': {
195
- 'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
196
- 'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
197
- 'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
198
- 'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
199
- 'num_return_sequences': 1,
200
- 'return_full_text': False,
201
- # "repetition_penalty": 0.0001
202
- },
203
- 'options': {
204
- 'wait_for_model': True,
205
-
206
- 'use_cache': True
207
- }
208
- })
209
-
210
- if len(response) > 0 and 'generated_text' in response[0]:
211
- response: str = response[0]['generated_text'].strip()
212
-
213
- st.chat_message('ai').code(response, language='json')
214
-
215
- history.add_user_message(prompt)
216
- history.add_ai_message(response)
217
-
218
- if GlobalConfig.COUNT_TOKENS:
219
- tokenizer = _get_tokenizer()
220
- tokens_count_in = len(tokenizer.tokenize(formatted_template))
221
- tokens_count_out = len(tokenizer.tokenize(response))
222
- logger.debug(
223
- 'Tokens count:: input: %d, output: %d',
224
- tokens_count_in, tokens_count_out
225
- )
226
-
227
- # _display_messages_history(view_messages)
228
-
229
- # The content has been generated as JSON
230
- # There maybe trailing ``` at the end of the response -- remove them
231
- # To be careful: ``` may be part of the content as well when code is generated
232
- progress_bar_pptx.progress(50, 'Analyzing response...')
233
- response_cleaned = _clean_json(response)
234
-
235
- # Now create the PPT file
236
- progress_bar_pptx.progress(75, 'Creating the slide deck...give it a moment')
237
- generate_slide_deck(response_cleaned)
238
- progress_bar_pptx.progress(100, text='Done!')
239
-
240
-
241
- def generate_slide_deck(json_str: str):
242
- """
243
- Create a slide deck.
244
-
245
- :param json_str: The content in *valid* JSON format.
246
- """
247
-
248
- if DOWNLOAD_FILE_KEY in st.session_state:
249
- path = pathlib.Path(st.session_state[DOWNLOAD_FILE_KEY])
250
- logger.debug('DOWNLOAD_FILE_KEY found in session')
251
- else:
252
- temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
253
- path = pathlib.Path(temp.name)
254
- st.session_state[DOWNLOAD_FILE_KEY] = str(path)
255
- logger.debug('DOWNLOAD_FILE_KEY not found in session')
256
-
257
- logger.debug('Creating PPTX file: %s...', st.session_state[DOWNLOAD_FILE_KEY])
258
-
259
- try:
260
- pptx_helper.generate_powerpoint_presentation(
261
- json_str,
262
- slides_template=pptx_template,
263
- output_file_path=path
264
- )
265
-
266
- _display_download_button(path)
267
- except ValueError as ve:
268
- st.error(APP_TEXT['json_parsing_error'])
269
- logger.error('%s', APP_TEXT['json_parsing_error'])
270
- logger.error('Additional error info: %s', str(ve))
271
- except Exception as ex:
272
- st.error(APP_TEXT['content_generation_error'])
273
- logger.error('Caught a generic exception: %s', str(ex))
274
-
275
-
276
- def _is_valid_prompt(prompt: str) -> bool:
277
- """
278
- Verify whether user input satisfies the concerned constraints.
279
-
280
- :param prompt: The user input text.
281
- :return: True if all criteria are satisfied; False otherwise.
282
- """
283
-
284
- if len(prompt) < 5 or ' ' not in prompt:
285
- st.error(
286
- 'Not enough information provided!'
287
- ' Please be a little more descriptive and type a few words with a few characters :)'
288
- )
289
- return False
290
-
291
- return True
292
-
293
-
294
- def _is_it_refinement() -> bool:
295
- """
296
- Whether it is the initial prompt or a refinement.
297
-
298
- :return: True if it is the initial prompt; False otherwise.
299
- """
300
-
301
- if IS_IT_REFINEMENT in st.session_state:
302
- return True
303
-
304
- if len(st.session_state[CHAT_MESSAGES]) >= 2:
305
- # Prepare for the next call
306
- st.session_state[IS_IT_REFINEMENT] = True
307
- return True
308
-
309
- return False
310
-
311
-
312
- def _get_user_messages() -> List[str]:
313
- """
314
- Get a list of user messages submitted until now from the session state.
315
-
316
- :return: The list of user messages.
317
- """
318
-
319
- return [
320
- msg.content for msg in st.session_state[CHAT_MESSAGES] if isinstance(msg, HumanMessage)
321
- ]
322
-
323
-
324
- def _get_last_response() -> str:
325
- """
326
- Get the last response generated by AI.
327
-
328
- :return: The response text.
329
- """
330
-
331
- return st.session_state[CHAT_MESSAGES][-1].content
332
-
333
-
334
- def _display_messages_history(view_messages: st.expander):
335
- """
336
- Display the history of messages.
337
-
338
- :param view_messages: The list of AI and Human messages.
339
- """
340
-
341
- with view_messages:
342
- view_messages.json(st.session_state[CHAT_MESSAGES])
343
-
344
- def _clean_json(json_str: str) -> str:
345
- """
346
- Attempt to clean a JSON response string from the LLM by removing the trailing ```
347
- and any text beyond that. May not be always accurate.
348
-
349
- :param json_str: The input string in JSON format.
350
- :return: The "cleaned" JSON string.
351
- """
352
-
353
- str_len = len(json_str)
354
- response_cleaned = json_str
355
-
356
- try:
357
- idx = json_str.rindex('```')
358
- logger.debug(
359
- 'Fixing JSON response: str_len: %d, idx of ```: %d',
360
- str_len, idx
361
- )
362
-
363
- if idx + 3 == str_len:
364
- # The response ends with ``` -- most likely the end of JSON response string
365
- response_cleaned = json_str[:idx]
366
- elif idx + 3 < str_len:
367
- # Looks like there are some more content beyond the last ```
368
- # In the best case, it would be some additional plain-text response from the LLM
369
- # and is unlikely to contain } or ] that are present in JSON
370
- if '}' not in json_str[idx + 3:]: # the remainder of the text
371
- response_cleaned = json_str[:idx]
372
- except ValueError:
373
- # No ``` found
374
- pass
375
-
376
- return response_cleaned
377
-
378
-
379
- def _display_download_button(file_path: pathlib.Path):
380
- """
381
- Display a download button to download a slide deck.
382
-
383
- :param file_path: The path of the .pptx file.
384
- """
385
-
386
- with open(file_path, 'rb') as download_file:
387
- st.download_button(
388
- 'Download PPTX file ⬇️',
389
- data=download_file,
390
- file_name='Presentation.pptx',
391
- key=datetime.datetime.now()
392
- )
393
-
394
-
395
- def main():
396
- """
397
- Trigger application run.
398
- """
399
-
400
- build_ui()
401
-
402
-
403
- if __name__ == '__main__':
404
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
legacy_app.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ import logging
3
+ import tempfile
4
+ from typing import List, Tuple
5
+
6
+ import json5
7
+ import metaphor_python as metaphor
8
+ import streamlit as st
9
+
10
+ from helpers import llm_helper, pptx_helper
11
+ from global_config import GlobalConfig
12
+
13
+
14
+ APP_TEXT = json5.loads(open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8').read())
15
+ GB_CONVERTER = 2 ** 30
16
+
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ @st.cache_data
22
+ def get_contents_wrapper(text: str) -> str:
23
+ """
24
+ Fetch and cache the slide deck contents on a topic by calling an external API.
25
+
26
+ :param text: The presentation topic.
27
+ :return: The slide deck contents or outline in JSON format.
28
+ """
29
+
30
+ logger.info('LLM call because of cache miss...')
31
+ return llm_helper.generate_slides_content(text).strip()
32
+
33
+
34
+ @st.cache_resource
35
+ def get_metaphor_client_wrapper() -> metaphor.Metaphor:
36
+ """
37
+ Create a Metaphor client for semantic Web search.
38
+
39
+ :return: Metaphor instance.
40
+ """
41
+
42
+ return metaphor.Metaphor(api_key=GlobalConfig.METAPHOR_API_KEY)
43
+
44
+
45
+ @st.cache_data
46
+ def get_web_search_results_wrapper(text: str) -> List[Tuple[str, str]]:
47
+ """
48
+ Fetch and cache the Web search results on a given topic.
49
+
50
+ :param text: The topic.
51
+ :return: A list of (title, link) tuples.
52
+ """
53
+
54
+ results = []
55
+ search_results = get_metaphor_client_wrapper().search(
56
+ text,
57
+ use_autoprompt=True,
58
+ num_results=5
59
+ )
60
+
61
+ for a_result in search_results.results:
62
+ results.append((a_result.title, a_result.url))
63
+
64
+ return results
65
+
66
+
67
+ def build_ui():
68
+ """
69
+ Display the input elements for content generation. Only covers the first step.
70
+ """
71
+
72
+ # get_disk_used_percentage()
73
+
74
+ st.title(APP_TEXT['app_name'])
75
+ st.subheader(APP_TEXT['caption'])
76
+ st.markdown(
77
+ 'Powered by'
78
+ ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).'
79
+ )
80
+ st.markdown(
81
+ '*If the JSON is generated or parsed incorrectly, try again later by making minor changes'
82
+ ' to the input text.*'
83
+ )
84
+
85
+ with st.form('my_form'):
86
+ # Topic input
87
+ try:
88
+ with open(GlobalConfig.PRELOAD_DATA_FILE, 'r', encoding='utf-8') as in_file:
89
+ preload_data = json5.loads(in_file.read())
90
+ except (FileExistsError, FileNotFoundError):
91
+ preload_data = {'topic': '', 'audience': ''}
92
+
93
+ topic = st.text_area(
94
+ APP_TEXT['input_labels'][0],
95
+ value=preload_data['topic']
96
+ )
97
+
98
+ texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
99
+ captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
100
+
101
+ pptx_template = st.radio(
102
+ 'Select a presentation template:',
103
+ texts,
104
+ captions=captions,
105
+ horizontal=True
106
+ )
107
+
108
+ st.divider()
109
+ submit = st.form_submit_button('Generate slide deck')
110
+
111
+ if submit:
112
+ # st.write(f'Clicked {time.time()}')
113
+ st.session_state.submitted = True
114
+
115
+ # https://github.com/streamlit/streamlit/issues/3832#issuecomment-1138994421
116
+ if 'submitted' in st.session_state:
117
+ progress_text = 'Generating the slides...give it a moment'
118
+ progress_bar = st.progress(0, text=progress_text)
119
+
120
+ topic_txt = topic.strip()
121
+ generate_presentation(topic_txt, pptx_template, progress_bar)
122
+
123
+ st.divider()
124
+ st.text(APP_TEXT['tos'])
125
+ st.text(APP_TEXT['tos2'])
126
+
127
+ st.markdown(
128
+ '![Visitors]'
129
+ '(https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)'
130
+ )
131
+
132
+
133
+ def generate_presentation(topic: str, pptx_template: str, progress_bar):
134
+ """
135
+ Process the inputs to generate the slides.
136
+
137
+ :param topic: The presentation topic based on which contents are to be generated.
138
+ :param pptx_template: The PowerPoint template name to be used.
139
+ :param progress_bar: Progress bar from the page.
140
+ """
141
+
142
+ topic_length = len(topic)
143
+ logger.debug('Input length:: topic: %s', topic_length)
144
+
145
+ if topic_length >= 10:
146
+ logger.debug('Topic: %s', topic)
147
+ target_length = min(topic_length, GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH)
148
+
149
+ try:
150
+ # Step 1: Generate the contents in JSON format using an LLM
151
+ json_str = process_slides_contents(topic[:target_length], progress_bar)
152
+ logger.debug('Truncated topic: %s', topic[:target_length])
153
+ logger.debug('Length of JSON: %d', len(json_str))
154
+
155
+ # Step 2: Generate the slide deck based on the template specified
156
+ if len(json_str) > 0:
157
+ st.info(
158
+ 'Tip: The generated content doesn\'t look so great?'
159
+ ' Need alternatives? Just change your description text and try again.',
160
+ icon="💡️"
161
+ )
162
+ else:
163
+ st.error(
164
+ 'Unfortunately, JSON generation failed, so the next steps would lead'
165
+ ' to nowhere. Try again or come back later.'
166
+ )
167
+ return
168
+
169
+ all_headers = generate_slide_deck(json_str, pptx_template, progress_bar)
170
+
171
+ # Step 3: Bonus stuff: Web references and AI art
172
+ show_bonus_stuff(all_headers)
173
+
174
+ except ValueError as ve:
175
+ st.error(f'Unfortunately, an error occurred: {ve}! '
176
+ f'Please change the text, try again later, or report it, sharing your inputs.')
177
+
178
+ else:
179
+ st.error('Not enough information provided! Please be little more descriptive :)')
180
+
181
+
182
+ def process_slides_contents(text: str, progress_bar: st.progress) -> str:
183
+ """
184
+ Convert given text into structured data and display. Update the UI.
185
+
186
+ :param text: The topic description for the presentation.
187
+ :param progress_bar: Progress bar for this step.
188
+ :return: The contents as a JSON-formatted string.
189
+ """
190
+
191
+ json_str = ''
192
+
193
+ try:
194
+ logger.info('Calling LLM for content generation on the topic: %s', text)
195
+ json_str = get_contents_wrapper(text)
196
+ except Exception as ex:
197
+ st.error(
198
+ f'An exception occurred while trying to convert to JSON. It could be because of heavy'
199
+ f' traffic or something else. Try doing it again or try again later.'
200
+ f'\nError message: {ex}'
201
+ )
202
+
203
+ progress_bar.progress(50, text='Contents generated')
204
+
205
+ with st.expander('The generated contents (in JSON format)'):
206
+ st.code(json_str, language='json')
207
+
208
+ return json_str
209
+
210
+
211
+ def generate_slide_deck(json_str: str, pptx_template: str, progress_bar) -> List:
212
+ """
213
+ Create a slide deck.
214
+
215
+ :param json_str: The contents in JSON format.
216
+ :param pptx_template: The PPTX template name.
217
+ :param progress_bar: Progress bar.
218
+ :return: A list of all slide headers and the title.
219
+ """
220
+
221
+ progress_text = 'Creating the slide deck...give it a moment'
222
+ progress_bar.progress(75, text=progress_text)
223
+
224
+ # # Get a unique name for the file to save -- use the session ID
225
+ # ctx = st_sr.get_script_run_ctx()
226
+ # session_id = ctx.session_id
227
+ # timestamp = time.time()
228
+ # output_file_name = f'{session_id}_{timestamp}.pptx'
229
+
230
+ temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
231
+ path = pathlib.Path(temp.name)
232
+
233
+ logger.info('Creating PPTX file...')
234
+ all_headers = pptx_helper.generate_powerpoint_presentation(
235
+ json_str,
236
+ slides_template=pptx_template,
237
+ output_file_path=path
238
+ )
239
+ progress_bar.progress(100, text='Done!')
240
+
241
+ with open(path, 'rb') as f:
242
+ st.download_button('Download PPTX file', f, file_name='Presentation.pptx')
243
+
244
+ return all_headers
245
+
246
+
247
+ def show_bonus_stuff(ppt_headers: List[str]):
248
+ """
249
+ Show bonus stuff for the presentation.
250
+
251
+ :param ppt_headers: A list of the slide headings.
252
+ """
253
+
254
+ # Use the presentation title and the slide headers to find relevant info online
255
+ logger.info('Calling Metaphor search...')
256
+ ppt_text = ' '.join(ppt_headers)
257
+ search_results = get_web_search_results_wrapper(ppt_text)
258
+ md_text_items = []
259
+
260
+ for (title, link) in search_results:
261
+ md_text_items.append(f'[{title}]({link})')
262
+
263
+ with st.expander('Related Web references'):
264
+ st.markdown('\n\n'.join(md_text_items))
265
+
266
+ logger.info('Done!')
267
+
268
+ # # Avoid image generation. It costs time and an API call, so just limit to the text generation.
269
+ # with st.expander('AI-generated image on the presentation topic'):
270
+ # logger.info('Calling SDXL for image generation...')
271
+ # # img_empty.write('')
272
+ # # img_text.write(APP_TEXT['image_info'])
273
+ # image = get_ai_image_wrapper(ppt_text)
274
+ #
275
+ # if len(image) > 0:
276
+ # image = base64.b64decode(image)
277
+ # st.image(image, caption=ppt_text)
278
+ # st.info('Tip: Right-click on the image to save it.', icon="💡️")
279
+ # logger.info('Image added')
280
+
281
+
282
+ def main():
283
+ """
284
+ Trigger application run.
285
+ """
286
+
287
+ build_ui()
288
+
289
+
290
+ if __name__ == '__main__':
291
+ main()