barunsaha commited on
Commit
e690364
1 Parent(s): d8d5fbc

Update chat history in prompts, segregate the prompts, add retry to HF API call, and update configs

Browse files
chat_app.py CHANGED
@@ -10,8 +10,9 @@ import streamlit as st
10
  from langchain_community.chat_message_histories import (
11
  StreamlitChatMessageHistory
12
  )
 
13
  from langchain_core.prompts import ChatPromptTemplate
14
- from langchain_core.runnables.history import RunnableWithMessageHistory
15
 
16
  from global_config import GlobalConfig
17
  from helpers import llm_helper, pptx_helper
@@ -28,10 +29,44 @@ def _load_strings() -> dict:
28
  return json5.loads(in_file.read())
29
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  APP_TEXT = _load_strings()
 
 
 
32
  DOWNLOAD_FILE_KEY = 'download_file_name'
33
- # langchain.debug = True
34
- # langchain.verbose = True
35
 
36
  logger = logging.getLogger(__name__)
37
  progress_bar = st.progress(0, text='Setting up SlideDeck AI...')
@@ -53,10 +88,9 @@ def display_page_header_content():
53
 
54
  st.title(APP_TEXT['app_name'])
55
  st.subheader(APP_TEXT['caption'])
56
- st.markdown(
57
- 'Powered by'
58
- ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)'
59
- )
60
 
61
 
62
  def display_page_footer_content():
@@ -65,9 +99,6 @@ def display_page_footer_content():
65
  """
66
 
67
  st.text(APP_TEXT['tos'] + '\n\n' + APP_TEXT['tos2'])
68
- # st.markdown(
69
- # '![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)' # noqa: E501
70
- # )
71
 
72
 
73
  def build_ui():
@@ -89,60 +120,117 @@ def set_up_chat_ui():
89
  Prepare the chat interface and related functionality.
90
  """
91
 
92
- history = StreamlitChatMessageHistory(key='chat_messages')
93
- llm = llm_helper.get_hf_endpoint()
94
-
95
- with open(GlobalConfig.CHAT_TEMPLATE_FILE, 'r', encoding='utf-8') as in_file:
96
- template = in_file.read()
97
-
98
- prompt = ChatPromptTemplate.from_template(template)
99
- chain = prompt | llm
100
-
101
- chain_with_history = RunnableWithMessageHistory(
102
- chain,
103
- lambda session_id: history, # Always return the instance created earlier
104
- input_messages_key='question',
105
- history_messages_key='chat_history',
106
- )
107
-
108
  with st.expander('Usage Instructions'):
109
  st.write(GlobalConfig.CHAT_USAGE_INSTRUCTIONS)
 
 
 
 
 
 
110
 
111
  st.chat_message('ai').write(
112
  random.choice(APP_TEXT['ai_greetings'])
113
  )
 
 
114
 
115
- for msg in history.messages:
116
- # st.chat_message(msg.type).markdown(msg.content)
117
- st.chat_message(msg.type).code(msg.content, language='json')
118
 
119
- # The download button disappears on clicking (anywhere) because of app reload
120
- # So, display it again
121
- if DOWNLOAD_FILE_KEY in st.session_state:
122
- _display_download_button(st.session_state[DOWNLOAD_FILE_KEY])
 
 
123
 
124
- progress_bar.progress(100, text='Done!')
125
- progress_bar.empty()
 
 
 
 
 
 
 
 
126
 
127
  if prompt := st.chat_input(
128
  placeholder=APP_TEXT['chat_placeholder'],
129
  max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
130
  ):
 
 
 
 
 
131
  logger.info('User input: %s', prompt)
132
  st.chat_message('user').write(prompt)
133
 
134
- progress_bar_pptx = st.progress(0, 'Calling LLM...')
135
-
136
- # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called
137
- config = {'configurable': {'session_id': 'any'}}
138
- response: str = chain_with_history.invoke({'question': prompt}, config)
139
- st.chat_message('ai').markdown('```json\n' + response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
  # The content has been generated as JSON
142
  # There maybe trailing ``` at the end of the response -- remove them
143
  # To be careful: ``` may be part of the content as well when code is generated
144
- response_cleaned = _clean_json(response)
145
  progress_bar_pptx.progress(50, 'Analyzing response...')
 
146
 
147
  # Now create the PPT file
148
  progress_bar_pptx.progress(75, 'Creating the slide deck...give it a moment')
@@ -185,6 +273,74 @@ def generate_slide_deck(json_str: str):
185
  logger.error('Caught a generic exception: %s', str(ex))
186
 
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  def _clean_json(json_str: str) -> str:
189
  """
190
  Attempt to clean a JSON response string from the LLM by removing the trailing ```
 
10
  from langchain_community.chat_message_histories import (
11
  StreamlitChatMessageHistory
12
  )
13
+ from langchain_core.messages import HumanMessage
14
  from langchain_core.prompts import ChatPromptTemplate
15
+ from transformers import AutoTokenizer
16
 
17
  from global_config import GlobalConfig
18
  from helpers import llm_helper, pptx_helper
 
29
  return json5.loads(in_file.read())
30
 
31
 
32
+ @st.cache_data
33
+ def _get_prompt_template(is_refinement: bool) -> str:
34
+ """
35
+ Return a prompt template.
36
+
37
+ :param is_refinement: Whether this is the initial or refinement prompt.
38
+ :return: The prompt template as f-string.
39
+ """
40
+
41
+ if is_refinement:
42
+ with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
43
+ template = in_file.read()
44
+ else:
45
+ with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
46
+ template = in_file.read()
47
+
48
+ return template
49
+
50
+
51
+ @st.cache_resource
52
+ def _get_tokenizer() -> AutoTokenizer:
53
+ """
54
+ Get Mistral tokenizer for counting tokens.
55
+
56
+ :return: The tokenizer.
57
+ """
58
+
59
+ return AutoTokenizer.from_pretrained(
60
+ pretrained_model_name_or_path=GlobalConfig.HF_LLM_MODEL_NAME
61
+ )
62
+
63
+
64
  APP_TEXT = _load_strings()
65
+
66
+ # Session variables
67
+ CHAT_MESSAGES = 'chat_messages'
68
  DOWNLOAD_FILE_KEY = 'download_file_name'
69
+ IS_IT_REFINEMENT = 'is_it_refinement'
 
70
 
71
  logger = logging.getLogger(__name__)
72
  progress_bar = st.progress(0, text='Setting up SlideDeck AI...')
 
88
 
89
  st.title(APP_TEXT['app_name'])
90
  st.subheader(APP_TEXT['caption'])
91
+ # st.markdown(
92
+ # '![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)' # noqa: E501
93
+ # )
 
94
 
95
 
96
  def display_page_footer_content():
 
99
  """
100
 
101
  st.text(APP_TEXT['tos'] + '\n\n' + APP_TEXT['tos2'])
 
 
 
102
 
103
 
104
  def build_ui():
 
120
  Prepare the chat interface and related functionality.
121
  """
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  with st.expander('Usage Instructions'):
124
  st.write(GlobalConfig.CHAT_USAGE_INSTRUCTIONS)
125
+ st.markdown(
126
+ 'SlideDeck AI is powered by'
127
+ ' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)'
128
+ )
129
+
130
+ # view_messages = st.expander('View the messages in the session state')
131
 
132
  st.chat_message('ai').write(
133
  random.choice(APP_TEXT['ai_greetings'])
134
  )
135
+ progress_bar.progress(100, text='Done!')
136
+ progress_bar.empty()
137
 
138
+ history = StreamlitChatMessageHistory(key=CHAT_MESSAGES)
 
 
139
 
140
+ if _is_it_refinement():
141
+ template = _get_prompt_template(is_refinement=True)
142
+ logger.debug('Getting refinement template')
143
+ else:
144
+ template = _get_prompt_template(is_refinement=False)
145
+ logger.debug('Getting initial template')
146
 
147
+ prompt_template = ChatPromptTemplate.from_template(template)
148
+
149
+ # Since Streamlit app reloads at every interaction, display the chat history
150
+ # from the save session state
151
+ for msg in history.messages:
152
+ msg_type = msg.type
153
+ if msg_type == 'user':
154
+ st.chat_message(msg_type).write(msg.content)
155
+ else:
156
+ st.chat_message(msg_type).code(msg.content, language='json')
157
 
158
  if prompt := st.chat_input(
159
  placeholder=APP_TEXT['chat_placeholder'],
160
  max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
161
  ):
162
+
163
+ progress_bar_pptx = st.progress(0, 'Preparing to run...')
164
+ if not _is_valid_prompt(prompt):
165
+ return
166
+
167
  logger.info('User input: %s', prompt)
168
  st.chat_message('user').write(prompt)
169
 
170
+ user_messages = _get_user_messages()
171
+ user_messages.append(prompt)
172
+ list_of_msgs = [
173
+ f'{idx + 1}. {msg}' for idx, msg in enumerate(user_messages)
174
+ ]
175
+ list_of_msgs = '\n'.join(list_of_msgs)
176
+
177
+ if _is_it_refinement():
178
+ formatted_template = prompt_template.format(
179
+ **{
180
+ 'instructions': list_of_msgs,
181
+ 'previous_content': _get_last_response()
182
+ }
183
+ )
184
+ else:
185
+ formatted_template = prompt_template.format(
186
+ **{
187
+ 'question': prompt,
188
+ }
189
+ )
190
+
191
+ progress_bar_pptx.progress(5, 'Calling LLM...will retry if connection times out...')
192
+ response: dict = llm_helper.hf_api_query({
193
+ 'inputs': formatted_template,
194
+ 'parameters': {
195
+ 'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
196
+ 'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
197
+ 'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
198
+ 'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
199
+ 'num_return_sequences': 1,
200
+ 'return_full_text': False,
201
+ # "repetition_penalty": 0.0001
202
+ },
203
+ 'options': {
204
+ 'wait_for_model': True,
205
+
206
+ 'use_cache': True
207
+ }
208
+ })
209
+
210
+ if len(response) > 0 and 'generated_text' in response[0]:
211
+ response: str = response[0]['generated_text'].strip()
212
+
213
+ st.chat_message('ai').code(response, language='json')
214
+
215
+ history.add_user_message(prompt)
216
+ history.add_ai_message(response)
217
+
218
+ if GlobalConfig.COUNT_TOKENS:
219
+ tokenizer = _get_tokenizer()
220
+ tokens_count_in = len(tokenizer.tokenize(formatted_template))
221
+ tokens_count_out = len(tokenizer.tokenize(response))
222
+ logger.debug(
223
+ 'Tokens count:: input: %d, output: %d',
224
+ tokens_count_in, tokens_count_out
225
+ )
226
+
227
+ # _display_messages_history(view_messages)
228
 
229
  # The content has been generated as JSON
230
  # There maybe trailing ``` at the end of the response -- remove them
231
  # To be careful: ``` may be part of the content as well when code is generated
 
232
  progress_bar_pptx.progress(50, 'Analyzing response...')
233
+ response_cleaned = _clean_json(response)
234
 
235
  # Now create the PPT file
236
  progress_bar_pptx.progress(75, 'Creating the slide deck...give it a moment')
 
273
  logger.error('Caught a generic exception: %s', str(ex))
274
 
275
 
276
+ def _is_valid_prompt(prompt: str) -> bool:
277
+ """
278
+ Verify whether user input satisfies the concerned constraints.
279
+
280
+ :param prompt: The user input text.
281
+ :return: True if all criteria are satisfied; False otherwise.
282
+ """
283
+
284
+ if len(prompt) < 5 or ' ' not in prompt:
285
+ st.error(
286
+ 'Not enough information provided!'
287
+ ' Please be a little more descriptive and type a few words with a few characters :)'
288
+ )
289
+ return False
290
+
291
+ return True
292
+
293
+
294
+ def _is_it_refinement() -> bool:
295
+ """
296
+ Whether it is the initial prompt or a refinement.
297
+
298
+ :return: True if it is the initial prompt; False otherwise.
299
+ """
300
+
301
+ if IS_IT_REFINEMENT in st.session_state:
302
+ return True
303
+
304
+ if len(st.session_state[CHAT_MESSAGES]) >= 2:
305
+ # Prepare for the next call
306
+ st.session_state[IS_IT_REFINEMENT] = True
307
+ return True
308
+
309
+ return False
310
+
311
+
312
+ def _get_user_messages() -> List[str]:
313
+ """
314
+ Get a list of user messages submitted until now from the session state.
315
+
316
+ :return: The list of user messages.
317
+ """
318
+
319
+ return [
320
+ msg.content for msg in st.session_state[CHAT_MESSAGES] if isinstance(msg, HumanMessage)
321
+ ]
322
+
323
+
324
+ def _get_last_response() -> str:
325
+ """
326
+ Get the last response generated by AI.
327
+
328
+ :return: The response text.
329
+ """
330
+
331
+ return st.session_state[CHAT_MESSAGES][-1].content
332
+
333
+
334
+ def _display_messages_history(view_messages: st.expander):
335
+ """
336
+ Display the history of messages.
337
+
338
+ :param view_messages: The list of AI and Human messages.
339
+ """
340
+
341
+ with view_messages:
342
+ view_messages.json(st.session_state[CHAT_MESSAGES])
343
+
344
  def _clean_json(json_str: str) -> str:
345
  """
346
  Attempt to clean a JSON response string from the LLM by removing the trailing ```
global_config.py CHANGED
@@ -13,18 +13,20 @@ class GlobalConfig:
13
  HF_LLM_MODEL_NAME = 'mistralai/Mistral-7B-Instruct-v0.2'
14
  LLM_MODEL_TEMPERATURE: float = 0.2
15
  LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
16
- LLM_MODEL_MAX_OUTPUT_LENGTH: int = 2000
17
- LLM_MODEL_MAX_INPUT_LENGTH: int = 100
18
 
19
  HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')
20
  METAPHOR_API_KEY = os.environ.get('METAPHOR_API_KEY', '')
21
 
22
  LOG_LEVEL = 'DEBUG'
 
23
  APP_STRINGS_FILE = 'strings.json'
24
  PRELOAD_DATA_FILE = 'examples/example_02.json'
25
  SLIDES_TEMPLATE_FILE = 'langchain_templates/template_combined.txt'
26
  JSON_TEMPLATE_FILE = 'langchain_templates/text_to_json_template_02.txt'
27
- CHAT_TEMPLATE_FILE = 'langchain_templates/template_combined_chat_history.txt'
 
28
 
29
  PPTX_TEMPLATE_FILES = {
30
  'Blank': {
 
13
  HF_LLM_MODEL_NAME = 'mistralai/Mistral-7B-Instruct-v0.2'
14
  LLM_MODEL_TEMPERATURE: float = 0.2
15
  LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
16
+ LLM_MODEL_MAX_OUTPUT_LENGTH: int = 4096
17
+ LLM_MODEL_MAX_INPUT_LENGTH: int = 750
18
 
19
  HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')
20
  METAPHOR_API_KEY = os.environ.get('METAPHOR_API_KEY', '')
21
 
22
  LOG_LEVEL = 'DEBUG'
23
+ COUNT_TOKENS = False
24
  APP_STRINGS_FILE = 'strings.json'
25
  PRELOAD_DATA_FILE = 'examples/example_02.json'
26
  SLIDES_TEMPLATE_FILE = 'langchain_templates/template_combined.txt'
27
  JSON_TEMPLATE_FILE = 'langchain_templates/text_to_json_template_02.txt'
28
+ INITIAL_PROMPT_TEMPLATE = 'langchain_templates/chat_prompts/initial_template.txt'
29
+ REFINEMENT_PROMPT_TEMPLATE = 'langchain_templates/chat_prompts/refinement_template.txt'
30
 
31
  PPTX_TEMPLATE_FILES = {
32
  'Blank': {
helpers/llm_helper.py CHANGED
@@ -1,5 +1,8 @@
1
  import logging
2
  import requests
 
 
 
3
  from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
4
  from langchain_core.language_models import LLM
5
 
@@ -11,10 +14,22 @@ HF_API_HEADERS = {"Authorization": f"Bearer {GlobalConfig.HUGGINGFACEHUB_API_TOK
11
 
12
  logger = logging.getLogger(__name__)
13
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def get_hf_endpoint() -> LLM:
16
  """
17
- Get an LLM via the HuggingFaceEndpoint.
18
 
19
  :return: The LLM.
20
  """
@@ -44,11 +59,11 @@ def hf_api_query(payload: dict) -> dict:
44
  """
45
 
46
  try:
47
- response = requests.post(HF_API_URL, headers=HF_API_HEADERS, json=payload, timeout=15)
48
  result = response.json()
49
  except requests.exceptions.Timeout as te:
50
  logger.error('*** Error: hf_api_query timeout! %s', str(te))
51
- result = {}
52
 
53
  return result
54
 
 
1
  import logging
2
  import requests
3
+ from requests.adapters import HTTPAdapter
4
+ from urllib3.util import Retry
5
+
6
  from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
7
  from langchain_core.language_models import LLM
8
 
 
14
 
15
  logger = logging.getLogger(__name__)
16
 
17
+ retries = Retry(
18
+ total=5,
19
+ backoff_factor=0.25,
20
+ backoff_jitter=0.3,
21
+ status_forcelist=[502, 503, 504],
22
+ allowed_methods={'POST'},
23
+ )
24
+ adapter = HTTPAdapter(max_retries=retries)
25
+ http_session = requests.Session()
26
+ http_session.mount('https://', adapter)
27
+ http_session.mount('http://', adapter)
28
+
29
 
30
  def get_hf_endpoint() -> LLM:
31
  """
32
+ Get an LLM via the HuggingFaceEndpoint of LangChain.
33
 
34
  :return: The LLM.
35
  """
 
59
  """
60
 
61
  try:
62
+ response = http_session.post(HF_API_URL, headers=HF_API_HEADERS, json=payload, timeout=15)
63
  result = response.json()
64
  except requests.exceptions.Timeout as te:
65
  logger.error('*** Error: hf_api_query timeout! %s', str(te))
66
+ result = []
67
 
68
  return result
69
 
helpers/pptx_helper.py CHANGED
@@ -64,7 +64,7 @@ def generate_powerpoint_presentation(
64
  parsed_data = json5.loads(structured_data)
65
 
66
  logger.debug(
67
- "*** Using PPTX template: %s",
68
  GlobalConfig.PPTX_TEMPLATE_FILES[slides_template]['file']
69
  )
70
  presentation = pptx.Presentation(GlobalConfig.PPTX_TEMPLATE_FILES[slides_template]['file'])
@@ -75,7 +75,10 @@ def generate_powerpoint_presentation(
75
  title = slide.shapes.title
76
  subtitle = slide.placeholders[1]
77
  title.text = parsed_data['title']
78
- logger.debug('Presentation title is: %s', title.text)
 
 
 
79
  subtitle.text = 'by Myself and SlideDeck AI :)'
80
  all_headers = [title.text, ]
81
 
 
64
  parsed_data = json5.loads(structured_data)
65
 
66
  logger.debug(
67
+ '*** Using PPTX template: %s',
68
  GlobalConfig.PPTX_TEMPLATE_FILES[slides_template]['file']
69
  )
70
  presentation = pptx.Presentation(GlobalConfig.PPTX_TEMPLATE_FILES[slides_template]['file'])
 
75
  title = slide.shapes.title
76
  subtitle = slide.placeholders[1]
77
  title.text = parsed_data['title']
78
+ logger.info(
79
+ 'PPT title: %s | #slides: %d',
80
+ title.text, len(parsed_data['slides'])
81
+ )
82
  subtitle.text = 'by Myself and SlideDeck AI :)'
83
  all_headers = [title.text, ]
84
 
langchain_templates/{template_combined_chat_history.txt → chat_prompts/initial_template.txt} RENAMED
@@ -1,21 +1,16 @@
1
- You are a helpful, intelligent chatbot. Follow the instructions and chat history, if available, to create or revise the slides for a presentation on the given topic.
2
  Include main headings for each slide, detailed bullet points for each slide.
3
  Add relevant content to each slide.
4
- The content should be descriptive, verbose, and detailed as much as possible.
5
  If relevant, add one or two examples to illustrate the concept.
6
  Unless explicitly specified with the topic, create about 10 slides.
7
 
8
 
9
- ### Instructions:
10
  {question}
11
 
12
 
13
- ### Chat history:
14
- {chat_history}
15
-
16
-
17
- The output should only be JSON and nothing else.
18
- The desired JSON output format:
19
  {{
20
  "title": "Presentation Title",
21
  "slides": [
@@ -33,7 +28,7 @@ The desired JSON output format:
33
  {{
34
  "heading": "Heading for the Second Slide",
35
  "bullet_points": [
36
- "First bullet point",
37
  "Second bullet item",
38
  "Third bullet point"
39
  ]
 
1
+ You are a helpful, intelligent chatbot. Create the slides for a presentation on the given topic.
2
  Include main headings for each slide, detailed bullet points for each slide.
3
  Add relevant content to each slide.
4
+ The content of each slide should be verbose, descriptive, and very detailed.
5
  If relevant, add one or two examples to illustrate the concept.
6
  Unless explicitly specified with the topic, create about 10 slides.
7
 
8
 
9
+ ### Topic:
10
  {question}
11
 
12
 
13
+ The output must be valid and syntactically correct JSON adhering to the following schema:
 
 
 
 
 
14
  {{
15
  "title": "Presentation Title",
16
  "slides": [
 
28
  {{
29
  "heading": "Heading for the Second Slide",
30
  "bullet_points": [
31
+ "First bullet point",
32
  "Second bullet item",
33
  "Third bullet point"
34
  ]
langchain_templates/chat_prompts/refinement_template.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a helpful, intelligent chatbot. You follow instructions to refine an existing slide deck.
2
+ A list of user instructions is provided below in sequential order -- from the oldest to the latest.
3
+ The previously generated content of the slide deck in JSON format is also provided.
4
+ Follow the instructions to revise the content of the previously generated slides of the presentation on the given topic.
5
+ Include main headings for each slide, detailed bullet points for each slide.
6
+ Add relevant content to each slide.
7
+ The content of the slides should be descriptive, verbose, and detailed.
8
+ If relevant, add one or two examples to illustrate the concept.
9
+ Unless explicitly specified with the topic, create about 10 slides.
10
+ You also fix any syntax error that may be present in the JSON-formatted content.
11
+
12
+
13
+ ### List of instructions:
14
+ {instructions}
15
+
16
+
17
+ ### Previously generated slide deck content as JSON:
18
+ {previous_content}
19
+
20
+
21
+ The output must be valid and syntactically correct JSON adhering to the following schema:
22
+ {{
23
+ "title": "Presentation Title",
24
+ "slides": [
25
+ {{
26
+ "heading": "Heading for the First Slide",
27
+ "bullet_points": [
28
+ "First bullet point",
29
+ [
30
+ "Sub-bullet point 1",
31
+ "Sub-bullet point 2"
32
+ ],
33
+ "Second bullet point"
34
+ ]
35
+ }},
36
+ {{
37
+ "heading": "Heading for the Second Slide",
38
+ "bullet_points": [
39
+ "First bullet point",
40
+ "Second bullet item",
41
+ "Third bullet point"
42
+ ]
43
+ }}
44
+ ]
45
+ }}
46
+
47
+
48
+ ### Output:
49
+ ```json
requirements.txt CHANGED
@@ -8,5 +8,7 @@ metaphor-python
8
  json5~=0.9.14
9
  requests~=2.31.0
10
 
11
- transformers
12
  langchain-community
 
 
 
8
  json5~=0.9.14
9
  requests~=2.31.0
10
 
11
+ transformers~=4.39.2
12
  langchain-community
13
+
14
+ urllib3~=2.2.1