awacke1 commited on
Commit
17ab601
โ€ข
1 Parent(s): d78c77f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +341 -674
app.py CHANGED
@@ -21,7 +21,7 @@ from audio_recorder_streamlit import audio_recorder
21
  from bs4 import BeautifulSoup
22
  from collections import deque
23
  from dotenv import load_dotenv
24
- from gradio_client import Client, handle_file
25
  from huggingface_hub import InferenceClient
26
  from io import BytesIO
27
  from moviepy.editor import VideoFileClip
@@ -31,560 +31,14 @@ from urllib.parse import quote
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
-
35
-
36
-
37
-
38
-
39
- # 1. ๐ŸšฒBikeAI๐Ÿ† Configuration and Setup
40
  Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
41
  title = "๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI"
42
  helpURL = 'https://huggingface.co/awacke1'
43
  bugURL = 'https://huggingface.co/spaces/awacke1'
44
  icons = '๐Ÿšฒ๐Ÿ†'
45
 
46
- st.set_page_config(
47
- page_title=title,
48
- page_icon=icons,
49
- layout="wide",
50
- initial_sidebar_state="auto",
51
- menu_items={
52
- 'Get Help': helpURL,
53
- 'Report a bug': bugURL,
54
- 'About': title
55
- }
56
- )
57
-
58
- # 2. ๐ŸšฒBikeAI๐Ÿ† Load environment variables and initialize clients
59
- load_dotenv()
60
-
61
- # OpenAI setup
62
- openai.api_key = os.getenv('OPENAI_API_KEY')
63
- if openai.api_key == None:
64
- openai.api_key = st.secrets['OPENAI_API_KEY']
65
-
66
- openai_client = OpenAI(
67
- api_key=os.getenv('OPENAI_API_KEY'),
68
- organization=os.getenv('OPENAI_ORG_ID')
69
- )
70
-
71
- # 3.๐ŸšฒBikeAI๐Ÿ† Claude setup
72
- anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
73
- if anthropic_key == None:
74
- anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
75
- claude_client = anthropic.Anthropic(api_key=anthropic_key)
76
-
77
- # 4.๐ŸšฒBikeAI๐Ÿ† Initialize session states
78
- if 'transcript_history' not in st.session_state:
79
- st.session_state.transcript_history = []
80
- if "chat_history" not in st.session_state:
81
- st.session_state.chat_history = []
82
- if "openai_model" not in st.session_state:
83
- st.session_state["openai_model"] = "gpt-4o-2024-05-13"
84
- if "messages" not in st.session_state:
85
- st.session_state.messages = []
86
- if 'last_voice_input' not in st.session_state:
87
- st.session_state.last_voice_input = ""
88
-
89
- # 5. ๐ŸšฒBikeAI๐Ÿ† HuggingFace AI setup
90
- API_URL = os.getenv('API_URL')
91
- HF_KEY = os.getenv('HF_KEY')
92
- MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
93
- MODEL2 = "openai/whisper-small.en"
94
- headers = {
95
- "Authorization": f"Bearer {HF_KEY}",
96
- "Content-Type": "application/json"
97
- }
98
-
99
- # 6. ๐ŸšฒBikeAI๐Ÿ† Custom CSS
100
- st.markdown("""
101
- <style>
102
- .main {
103
- background: linear-gradient(to right, #1a1a1a, #2d2d2d);
104
- color: #ffffff;
105
- }
106
- .stMarkdown {
107
- font-family: 'Helvetica Neue', sans-serif;
108
- }
109
- .category-header {
110
- background: linear-gradient(45deg, #2b5876, #4e4376);
111
- padding: 20px;
112
- border-radius: 10px;
113
- margin: 10px 0;
114
- }
115
- .scene-card {
116
- background: rgba(0,0,0,0.3);
117
- padding: 15px;
118
- border-radius: 8px;
119
- margin: 10px 0;
120
- border: 1px solid rgba(255,255,255,0.1);
121
- }
122
- .media-gallery {
123
- display: grid;
124
- gap: 1rem;
125
- padding: 1rem;
126
- }
127
- .bike-card {
128
- background: rgba(255,255,255,0.05);
129
- border-radius: 10px;
130
- padding: 15px;
131
- transition: transform 0.3s;
132
- }
133
- .bike-card:hover {
134
- transform: scale(1.02);
135
- }
136
- </style>
137
- """, unsafe_allow_html=True)
138
-
139
-
140
- # 7. Helper Functions
141
- def generate_filename(prompt, file_type):
142
- """Generate a safe filename using the prompt and file type."""
143
- central = pytz.timezone('US/Central')
144
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
145
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
146
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
147
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
148
-
149
-
150
-
151
-
152
- # 8. Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
153
- def create_file(filename, prompt, response, should_save=True):
154
- if not should_save:
155
- return
156
- with open(filename, 'w', encoding='utf-8') as file:
157
- file.write(prompt + "\n\n" + response)
158
- def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
159
- """Create and save file with proper handling of different types."""
160
- if not should_save:
161
- return None
162
- filename = generate_filename(prompt if prompt else content, file_type)
163
- with open(filename, "w", encoding="utf-8") as f:
164
- if is_image:
165
- f.write(content)
166
- else:
167
- f.write(prompt + "\n\n" + content if prompt else content)
168
- return filename
169
-
170
-
171
- def get_download_link(file_path):
172
- """Create download link for file."""
173
- with open(file_path, "rb") as file:
174
- contents = file.read()
175
- b64 = base64.b64encode(contents).decode()
176
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}๐Ÿ“‚</a>'
177
-
178
- @st.cache_resource
179
- def SpeechSynthesis(result):
180
- """HTML5 Speech Synthesis."""
181
- documentHTML5 = f'''
182
- <!DOCTYPE html>
183
- <html>
184
- <head>
185
- <title>Read It Aloud</title>
186
- <script type="text/javascript">
187
- function readAloud() {{
188
- const text = document.getElementById("textArea").value;
189
- const speech = new SpeechSynthesisUtterance(text);
190
- window.speechSynthesis.speak(speech);
191
- }}
192
- </script>
193
- </head>
194
- <body>
195
- <h1>๐Ÿ”Š Read It Aloud</h1>
196
- <textarea id="textArea" rows="10" cols="80">{result}</textarea>
197
- <br>
198
- <button onclick="readAloud()">๐Ÿ”Š Read Aloud</button>
199
- </body>
200
- </html>
201
- '''
202
- components.html(documentHTML5, width=1280, height=300)
203
-
204
- # Media Processing Functions
205
- def process_image(image_input, user_prompt):
206
- """Process image with GPT-4o vision."""
207
- if isinstance(image_input, str):
208
- with open(image_input, "rb") as image_file:
209
- image_input = image_file.read()
210
-
211
- base64_image = base64.b64encode(image_input).decode("utf-8")
212
-
213
- response = openai_client.chat.completions.create(
214
- model=st.session_state["openai_model"],
215
- messages=[
216
- {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
217
- {"role": "user", "content": [
218
- {"type": "text", "text": user_prompt},
219
- {"type": "image_url", "image_url": {
220
- "url": f"data:image/png;base64,{base64_image}"
221
- }}
222
- ]}
223
- ],
224
- temperature=0.0,
225
- )
226
-
227
- return response.choices[0].message.content
228
-
229
- def process_audio(audio_input, text_input=''):
230
- """Process audio with Whisper and GPT."""
231
- if isinstance(audio_input, str):
232
- with open(audio_input, "rb") as file:
233
- audio_input = file.read()
234
-
235
- transcription = openai_client.audio.transcriptions.create(
236
- model="whisper-1",
237
- file=audio_input,
238
- )
239
-
240
- st.session_state.messages.append({"role": "user", "content": transcription.text})
241
-
242
- with st.chat_message("assistant"):
243
- st.markdown(transcription.text)
244
- SpeechSynthesis(transcription.text)
245
-
246
- filename = generate_filename(transcription.text, "wav")
247
- create_and_save_file(audio_input, "wav", transcription.text, True)
248
-
249
- def process_video(video_path, seconds_per_frame=1):
250
- """Process video files for frame extraction and audio."""
251
- base64Frames = []
252
- video = cv2.VideoCapture(video_path)
253
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
254
- fps = video.get(cv2.CAP_PROP_FPS)
255
- frames_to_skip = int(fps * seconds_per_frame)
256
-
257
- for frame_idx in range(0, total_frames, frames_to_skip):
258
- video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
259
- success, frame = video.read()
260
- if not success:
261
- break
262
- _, buffer = cv2.imencode(".jpg", frame)
263
- base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
264
-
265
- video.release()
266
-
267
- # Extract audio
268
- base_video_path = os.path.splitext(video_path)[0]
269
- audio_path = f"{base_video_path}.mp3"
270
- try:
271
- video_clip = VideoFileClip(video_path)
272
- video_clip.audio.write_audiofile(audio_path)
273
- video_clip.close()
274
- except:
275
- st.warning("No audio track found in video")
276
- audio_path = None
277
-
278
- return base64Frames, audio_path
279
-
280
- def process_video_with_gpt(video_input, user_prompt):
281
- """Process video with GPT-4o vision."""
282
- base64Frames, audio_path = process_video(video_input)
283
-
284
- response = openai_client.chat.completions.create(
285
- model=st.session_state["openai_model"],
286
- messages=[
287
- {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
288
- {"role": "user", "content": [
289
- {"type": "text", "text": user_prompt},
290
- *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
291
- for frame in base64Frames]
292
- ]}
293
- ]
294
- )
295
-
296
- return response.choices[0].message.content
297
-
298
-
299
- def extract_urls(text):
300
- try:
301
- date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
302
- abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
303
- pdf_link_pattern = re.compile(r'\[โฌ‡๏ธ\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
304
- title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
305
- date_matches = date_pattern.findall(text)
306
- abs_link_matches = abs_link_pattern.findall(text)
307
- pdf_link_matches = pdf_link_pattern.findall(text)
308
- title_matches = title_pattern.findall(text)
309
-
310
- # markdown with the extracted fields
311
- markdown_text = ""
312
- for i in range(len(date_matches)):
313
- date = date_matches[i]
314
- title = title_matches[i]
315
- abs_link = abs_link_matches[i][1]
316
- pdf_link = pdf_link_matches[i]
317
- markdown_text += f"**Date:** {date}\n\n"
318
- markdown_text += f"**Title:** {title}\n\n"
319
- markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
320
- markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
321
- markdown_text += "---\n\n"
322
- return markdown_text
323
-
324
- except:
325
- st.write('.')
326
- return ''
327
-
328
-
329
- def search_arxiv(query):
330
-
331
- st.write("Performing AI Lookup...")
332
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
333
-
334
- result1 = client.predict(
335
- prompt=query,
336
- llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
337
- stream_outputs=True,
338
- api_name="/ask_llm"
339
- )
340
- st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
341
- st.markdown(result1)
342
-
343
- result2 = client.predict(
344
- prompt=query,
345
- llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
346
- stream_outputs=True,
347
- api_name="/ask_llm"
348
- )
349
- st.markdown("### Mistral-7B-Instruct-v0.2 Result")
350
- st.markdown(result2)
351
- combined_result = f"{result1}\n\n{result2}"
352
- return combined_result
353
-
354
- #return responseall
355
-
356
-
357
- # Function to generate a filename based on prompt and time (because names matter ๐Ÿ•’)
358
- def generate_filename(prompt, file_type):
359
- central = pytz.timezone('US/Central')
360
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
361
- safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
362
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
363
-
364
- # Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
365
- def create_file(filename, prompt, response):
366
- with open(filename, 'w', encoding='utf-8') as file:
367
- file.write(prompt + "\n\n" + response)
368
-
369
-
370
- def perform_ai_lookup(query):
371
- start_time = time.strftime("%Y-%m-%d %H:%M:%S")
372
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
373
- response1 = client.predict(
374
- query,
375
- 20,
376
- "Semantic Search",
377
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
378
- api_name="/update_with_rag_md"
379
- )
380
- Question = '### ๐Ÿ”Ž ' + query + '\r\n' # Format for markdown display with links
381
- References = response1[0]
382
- ReferenceLinks = extract_urls(References)
383
-
384
- RunSecondQuery = True
385
- results=''
386
- if RunSecondQuery:
387
- # Search 2 - Retrieve the Summary with Papers Context and Original Query
388
- response2 = client.predict(
389
- query,
390
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
391
- True,
392
- api_name="/ask_llm"
393
- )
394
- if len(response2) > 10:
395
- Answer = response2
396
- SpeechSynthesis(Answer)
397
- # Restructure results to follow format of Question, Answer, References, ReferenceLinks
398
- results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
399
- st.markdown(results)
400
-
401
- st.write('๐Ÿ”Run of Multi-Agent System Paper Summary Spec is Complete')
402
- end_time = time.strftime("%Y-%m-%d %H:%M:%S")
403
- start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
404
- end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
405
- elapsed_seconds = end_timestamp - start_timestamp
406
- st.write(f"Start time: {start_time}")
407
- st.write(f"Finish time: {end_time}")
408
- st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
409
-
410
-
411
- filename = generate_filename(query, "md")
412
- create_file(filename, query, results)
413
- return results
414
-
415
- # Chat Processing Functions
416
- def process_with_gpt(text_input):
417
- """Process text with GPT-4o."""
418
- if text_input:
419
- st.session_state.messages.append({"role": "user", "content": text_input})
420
-
421
- with st.chat_message("user"):
422
- st.markdown(text_input)
423
-
424
- with st.chat_message("assistant"):
425
- completion = openai_client.chat.completions.create(
426
- model=st.session_state["openai_model"],
427
- messages=[
428
- {"role": m["role"], "content": m["content"]}
429
- for m in st.session_state.messages
430
- ],
431
- stream=False
432
- )
433
- return_text = completion.choices[0].message.content
434
- st.write("GPT-4o: " + return_text)
435
-
436
- #filename = generate_filename(text_input, "md")
437
- filename = generate_filename("GPT-4o: " + return_text, "md")
438
- create_file(filename, text_input, return_text)
439
- st.session_state.messages.append({"role": "assistant", "content": return_text})
440
- return return_text
441
-
442
- def process_with_claude(text_input):
443
- """Process text with Claude."""
444
- if text_input:
445
-
446
- with st.chat_message("user"):
447
- st.markdown(text_input)
448
-
449
- with st.chat_message("assistant"):
450
- response = claude_client.messages.create(
451
- model="claude-3-sonnet-20240229",
452
- max_tokens=1000,
453
- messages=[
454
- {"role": "user", "content": text_input}
455
- ]
456
- )
457
- response_text = response.content[0].text
458
- st.write("Claude: " + response_text)
459
-
460
- #filename = generate_filename(text_input, "md")
461
- filename = generate_filename("Claude: " + response_text, "md")
462
- create_file(filename, text_input, response_text)
463
-
464
- st.session_state.chat_history.append({
465
- "user": text_input,
466
- "claude": response_text
467
- })
468
- return response_text
469
-
470
- # File Management Functions
471
- def load_file(file_name):
472
- """Load file content."""
473
- with open(file_name, "r", encoding='utf-8') as file:
474
- content = file.read()
475
- return content
476
-
477
- def create_zip_of_files(files):
478
- """Create zip archive of files."""
479
- zip_name = "all_files.zip"
480
- with zipfile.ZipFile(zip_name, 'w') as zipf:
481
- for file in files:
482
- zipf.write(file)
483
- return zip_name
484
-
485
-
486
-
487
- def get_media_html(media_path, media_type="video", width="100%"):
488
- """Generate HTML for media player."""
489
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
490
- if media_type == "video":
491
- return f'''
492
- <video width="{width}" controls autoplay muted loop>
493
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
494
- Your browser does not support the video tag.
495
- </video>
496
- '''
497
- else: # audio
498
- return f'''
499
- <audio controls style="width: {width};">
500
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
501
- Your browser does not support the audio element.
502
- </audio>
503
- '''
504
-
505
- def create_media_gallery():
506
- """Create the media gallery interface."""
507
- st.header("๐ŸŽฌ Media Gallery")
508
-
509
- tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])
510
-
511
- with tabs[0]:
512
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
513
- if image_files:
514
- num_cols = st.slider("Number of columns", 1, 5, 3)
515
- cols = st.columns(num_cols)
516
- for idx, image_file in enumerate(image_files):
517
- with cols[idx % num_cols]:
518
- img = Image.open(image_file)
519
- st.image(img, use_container_width=True)
520
-
521
- # Add GPT vision analysis option
522
- if st.button(f"Analyze {os.path.basename(image_file)}"):
523
- analysis = process_image(image_file,
524
- "Describe this image in detail and identify key elements.")
525
- st.markdown(analysis)
526
-
527
- with tabs[1]:
528
- audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
529
- for audio_file in audio_files:
530
- with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
531
- st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
532
- if st.button(f"Transcribe {os.path.basename(audio_file)}"):
533
- with open(audio_file, "rb") as f:
534
- transcription = process_audio(f)
535
- st.write(transcription)
536
-
537
- with tabs[2]:
538
- video_files = glob.glob("*.mp4")
539
- for video_file in video_files:
540
- with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
541
- st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
542
- if st.button(f"Analyze {os.path.basename(video_file)}"):
543
- analysis = process_video_with_gpt(video_file,
544
- "Describe what's happening in this video.")
545
- st.markdown(analysis)
546
-
547
-
548
-
549
- def display_file_manager():
550
- """Display file management sidebar with guaranteed unique button keys."""
551
- st.sidebar.title("๐Ÿ“ File Management")
552
-
553
- all_files = glob.glob("*.md")
554
- all_files.sort(reverse=True)
555
-
556
- if st.sidebar.button("๐Ÿ—‘ Delete All", key="delete_all_files_button"):
557
- for file in all_files:
558
- os.remove(file)
559
- st.rerun()
560
-
561
- if st.sidebar.button("โฌ‡๏ธ Download All", key="download_all_files_button"):
562
- zip_file = create_zip_of_files(all_files)
563
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
564
-
565
- # Create unique keys using file attributes
566
- for idx, file in enumerate(all_files):
567
- # Get file stats for unique identification
568
- file_stat = os.stat(file)
569
- unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
570
-
571
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
572
- with col1:
573
- if st.button("๐ŸŒ", key=f"view_{unique_id}"):
574
- st.session_state.current_file = file
575
- st.session_state.file_content = load_file(file)
576
- with col2:
577
- st.markdown(get_download_link(file), unsafe_allow_html=True)
578
- with col3:
579
- if st.button("๐Ÿ“‚", key=f"edit_{unique_id}"):
580
- st.session_state.current_file = file
581
- st.session_state.file_content = load_file(file)
582
- with col4:
583
- if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
584
- os.remove(file)
585
- st.rerun()
586
-
587
-
588
  speech_recognition_html = """
589
  <!DOCTYPE html>
590
  <html>
@@ -705,68 +159,267 @@ speech_recognition_html = """
705
  </html>
706
  """
707
 
708
- # Helper Functions
709
- def generate_filename(prompt, file_type):
710
- central = pytz.timezone('US/Central')
711
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
712
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
713
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
714
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
 
 
 
 
 
715
 
716
- # File Management Functions
717
- def load_file(file_name):
718
- """Load file content."""
719
- with open(file_name, "r", encoding='utf-8') as file:
720
- content = file.read()
721
- return content
722
 
723
- def create_zip_of_files(files):
724
- """Create zip archive of files."""
725
- zip_name = "all_files.zip"
726
- with zipfile.ZipFile(zip_name, 'w') as zipf:
727
- for file in files:
728
- zipf.write(file)
729
- return zip_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
 
731
- def get_download_link(file):
732
- """Create download link for file."""
733
- with open(file, "rb") as f:
734
- contents = f.read()
735
- b64 = base64.b64encode(contents).decode()
736
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}๐Ÿ“‚</a>'
 
 
 
737
 
738
- def display_file_manager():
739
- """Display file management sidebar."""
740
- st.sidebar.title("๐Ÿ“ File Management")
 
 
 
 
741
 
742
- all_files = glob.glob("*.md")
743
- all_files.sort(reverse=True)
 
 
 
 
 
 
 
744
 
745
- if st.sidebar.button("๐Ÿ—‘ Delete All"):
746
- for file in all_files:
747
- os.remove(file)
748
- st.rerun()
 
 
 
 
 
 
749
 
750
- if st.sidebar.button("โฌ‡๏ธ Download All"):
751
- zip_file = create_zip_of_files(all_files)
752
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
753
 
754
- for file in all_files:
755
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
 
 
 
 
 
 
 
 
756
  with col1:
757
- if st.button("๐ŸŒ", key="view_"+file):
758
- st.session_state.current_file = file
759
- st.session_state.file_content = load_file(file)
 
 
 
760
  with col2:
761
- st.markdown(get_download_link(file), unsafe_allow_html=True)
 
 
 
 
 
762
  with col3:
763
- if st.button("๐Ÿ“‚", key="edit_"+file):
764
- st.session_state.current_file = file
765
- st.session_state.file_content = load_file(file)
766
- with col4:
767
- if st.button("๐Ÿ—‘", key="delete_"+file):
768
- os.remove(file)
769
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
770
 
771
  def create_media_gallery():
772
  """Create the media gallery interface."""
@@ -783,8 +436,6 @@ def create_media_gallery():
783
  with cols[idx % num_cols]:
784
  img = Image.open(image_file)
785
  st.image(img, use_container_width=True)
786
-
787
- # Add GPT vision analysis option
788
  if st.button(f"Analyze {os.path.basename(image_file)}"):
789
  analysis = process_image(image_file,
790
  "Describe this image in detail and identify key elements.")
@@ -809,8 +460,6 @@ def create_media_gallery():
809
  analysis = process_video_with_gpt(video_file,
810
  "Describe what's happening in this video.")
811
  st.markdown(analysis)
812
-
813
-
814
 
815
  def get_media_html(media_path, media_type="video", width="100%"):
816
  """Generate HTML for media player."""
@@ -830,83 +479,101 @@ def get_media_html(media_path, media_type="video", width="100%"):
830
  </audio>
831
  '''
832
 
833
- @st.cache_resource
834
- def set_transcript(text):
835
- """Set transcript in session state."""
836
- st.session_state.voice_transcript = text
837
- if tab_main == "๐ŸŽค Voice Input":
838
- st.subheader("Voice Recognition")
839
-
840
- if 'voice_transcript' not in st.session_state:
841
- st.session_state.voice_transcript = ""
842
-
843
- # Speech recognition component
844
- st.components.v1.html(speech_recognition_html, height=400)
845
-
846
- # Transcript receiver
847
- transcript_receiver = st.components.v1.html("""
848
- <script>
849
- window.addEventListener('message', function(e) {
850
- if (e.data && e.data.type === 'final_transcript') {
851
- window.Streamlit.setComponentValue(e.data.text);
852
- }
853
- });
854
- </script>
855
- """, height=0)
856
 
857
- # Update session state if new transcript received
858
- if transcript_receiver:
859
- st.session_state.voice_transcript = transcript_receiver
 
860
 
861
- # Display transcript
862
- st.markdown("### Processed Voice Input:")
863
- st.text_area(
864
- "Voice Transcript",
865
- value=st.session_state.voice_transcript if isinstance(st.session_state.voice_transcript, str) else "",
866
- height=100
867
- )
868
 
869
-
870
- # Process buttons
871
- col1, col2, col3 = st.columns(3)
872
-
873
  with col1:
874
- if st.button("Process with GPT"):
875
- if st.session_state.voice_transcript:
876
- st.markdown("### GPT Response:")
877
- gpt_response = process_with_gpt(st.session_state.voice_transcript)
878
- st.markdown(gpt_response)
879
-
880
  with col2:
881
- if st.button("Process with Claude"):
882
- if st.session_state.voice_transcript:
883
- st.markdown("### Claude Response:")
884
- claude_response = process_with_claude(st.session_state.voice_transcript)
885
- st.markdown(claude_response)
886
-
887
  with col3:
888
- if st.button("Clear Transcript"):
889
- st.session_state.voice_transcript = ""
890
- st.experimental_rerun()
 
 
 
 
891
 
892
- # Show ArXiv search option if there's a transcript
893
- if st.session_state.voice_transcript:
894
- if st.button("Search ArXiv"):
895
- st.markdown("### ArXiv Search Results:")
896
- arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
897
- st.markdown(arxiv_results)
898
-
899
- elif tab_main == "๐Ÿ“ File Editor":
900
- if hasattr(st.session_state, 'current_file'):
901
- st.subheader(f"Editing: {st.session_state.current_file}")
902
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
903
- if st.button("Save Changes"):
904
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
905
- file.write(new_content)
906
- st.success("File updated successfully!")
907
 
908
- # Always show file manager in sidebar
909
- display_file_manager()
910
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
911
  if __name__ == "__main__":
912
  main()
 
21
  from bs4 import BeautifulSoup
22
  from collections import deque
23
  from dotenv import load_dotenv
24
+ from gradio_client import Client
25
  from huggingface_hub import InferenceClient
26
  from io import BytesIO
27
  from moviepy.editor import VideoFileClip
 
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
+ # Configuration constants
 
 
 
 
 
35
  Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
36
  title = "๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI"
37
  helpURL = 'https://huggingface.co/awacke1'
38
  bugURL = 'https://huggingface.co/spaces/awacke1'
39
  icons = '๐Ÿšฒ๐Ÿ†'
40
 
41
+ # Speech Recognition HTML Template
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  speech_recognition_html = """
43
  <!DOCTYPE html>
44
  <html>
 
159
  </html>
160
  """
161
 
162
+ # Streamlit page configuration
163
+ st.set_page_config(
164
+ page_title=title,
165
+ page_icon=icons,
166
+ layout="wide",
167
+ initial_sidebar_state="auto",
168
+ menu_items={
169
+ 'Get Help': helpURL,
170
+ 'Report a bug': bugURL,
171
+ 'About': title
172
+ }
173
+ )
174
 
175
+ # Load environment variables
176
+ load_dotenv()
 
 
 
 
177
 
178
+ # OpenAI setup
179
+ openai.api_key = os.getenv('OPENAI_API_KEY')
180
+ if openai.api_key == None:
181
+ openai.api_key = st.secrets['OPENAI_API_KEY']
182
+
183
+ openai_client = OpenAI(
184
+ api_key=os.getenv('OPENAI_API_KEY'),
185
+ organization=os.getenv('OPENAI_ORG_ID')
186
+ )
187
+
188
+ # Claude setup
189
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
190
+ if anthropic_key == None:
191
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
192
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
193
+
194
+ # Initialize session states
195
+ if 'transcript_history' not in st.session_state:
196
+ st.session_state.transcript_history = []
197
+ if "chat_history" not in st.session_state:
198
+ st.session_state.chat_history = []
199
+ if "openai_model" not in st.session_state:
200
+ st.session_state["openai_model"] = "gpt-4-vision-preview"
201
+ if "messages" not in st.session_state:
202
+ st.session_state.messages = []
203
+ if 'voice_transcript' not in st.session_state:
204
+ st.session_state.voice_transcript = ""
205
+
206
+ # Main processing functions
207
+ def process_with_gpt(text_input):
208
+ """Process text with GPT-4."""
209
+ if text_input:
210
+ st.session_state.messages.append({"role": "user", "content": text_input})
211
+
212
+ with st.chat_message("user"):
213
+ st.markdown(text_input)
214
+
215
+ with st.chat_message("assistant"):
216
+ completion = openai_client.chat.completions.create(
217
+ model=st.session_state["openai_model"],
218
+ messages=[
219
+ {"role": m["role"], "content": m["content"]}
220
+ for m in st.session_state.messages
221
+ ],
222
+ stream=False
223
+ )
224
+ return_text = completion.choices[0].message.content
225
+ st.write("GPT-4: " + return_text)
226
+
227
+ filename = generate_filename("GPT-4: " + return_text, "md")
228
+ create_file(filename, text_input, return_text)
229
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
230
+ return return_text
231
+
232
+ def process_with_claude(text_input):
233
+ """Process text with Claude."""
234
+ if text_input:
235
+ with st.chat_message("user"):
236
+ st.markdown(text_input)
237
+
238
+ with st.chat_message("assistant"):
239
+ response = claude_client.messages.create(
240
+ model="claude-3-sonnet-20240229",
241
+ max_tokens=1000,
242
+ messages=[
243
+ {"role": "user", "content": text_input}
244
+ ]
245
+ )
246
+ response_text = response.content[0].text
247
+ st.write("Claude: " + response_text)
248
+
249
+ filename = generate_filename("Claude: " + response_text, "md")
250
+ create_file(filename, text_input, response_text)
251
+
252
+ st.session_state.chat_history.append({
253
+ "user": text_input,
254
+ "claude": response_text
255
+ })
256
+ return response_text
257
+
258
+ def perform_ai_lookup(query):
259
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
260
+ start_time = time.strftime("%Y-%m-%d %H:%M:%S")
261
+
262
+ response1 = client.predict(
263
+ query,
264
+ 20,
265
+ "Semantic Search",
266
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
267
+ api_name="/update_with_rag_md"
268
+ )
269
+
270
+ Question = '### ๐Ÿ”Ž ' + query + '\r\n'
271
+ References = response1[0]
272
+ ReferenceLinks = extract_urls(References)
273
+
274
+ RunSecondQuery = True
275
+ results = ''
276
+ if RunSecondQuery:
277
+ response2 = client.predict(
278
+ query,
279
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
280
+ True,
281
+ api_name="/ask_llm"
282
+ )
283
+ if len(response2) > 10:
284
+ Answer = response2
285
+ results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
286
+ st.markdown(results)
287
 
288
+ end_time = time.strftime("%Y-%m-%d %H:%M:%S")
289
+ start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
290
+ end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
291
+ elapsed_seconds = end_timestamp - start_timestamp
292
+
293
+ st.write('๐Ÿ”Run of Multi-Agent System Paper Summary Spec is Complete')
294
+ st.write(f"Start time: {start_time}")
295
+ st.write(f"Finish time: {end_time}")
296
+ st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
297
 
298
+ filename = generate_filename(query, "md")
299
+ create_file(filename, query, results)
300
+ return results
301
+
302
+ # Main function
303
+ def main():
304
+ st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
305
 
306
+ tab_main = st.radio("Choose Action:",
307
+ ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
308
+ horizontal=True)
309
+
310
+ if tab_main == "๐ŸŽค Voice Input":
311
+ st.subheader("Voice Recognition")
312
+
313
+ # Display speech recognition component
314
+ st.components.v1.html(speech_recognition_html, height=400)
315
 
316
+ # Transcript receiver
317
+ transcript_receiver = st.components.v1.html("""
318
+ <script>
319
+ window.addEventListener('message', function(e) {
320
+ if (e.data && e.data.type === 'final_transcript') {
321
+ window.Streamlit.setComponentValue(e.data.text);
322
+ }
323
+ });
324
+ </script>
325
+ """, height=0)
326
 
327
+ # Update session state if new transcript received
328
+ if transcript_receiver:
329
+ st.session_state.voice_transcript = transcript_receiver
330
 
331
+ # Display transcript
332
+ st.markdown("### Processed Voice Input:")
333
+ st.text_area(
334
+ "Voice Transcript",
335
+ value=st.session_state.voice_transcript if isinstance(st.session_state.voice_transcript, str) else "",
336
+ height=100
337
+ )
338
+
339
+ # Process buttons
340
+ col1, col2, col3 = st.columns(3)
341
  with col1:
342
+ if st.button("Process with GPT"):
343
+ if st.session_state.voice_transcript:
344
+ st.markdown("### GPT Response:")
345
+ gpt_response = process_with_gpt(st.session_state.voice_transcript)
346
+ st.markdown(gpt_response)
347
+
348
  with col2:
349
+ if st.button("Process with Claude"):
350
+ if st.session_state.voice_transcript:
351
+ st.markdown("### Claude Response:")
352
+ claude_response = process_with_claude(st.session_state.voice_transcript)
353
+ st.markdown(claude_response)
354
+
355
  with col3:
356
+ if st.button("Clear Transcript"):
357
+ st.session_state.voice_transcript = ""
358
+ st.experimental_rerun()
359
+
360
+ if st.session_state.voice_transcript:
361
+ if st.button("Search ArXiv"):
362
+ st.markdown("### ArXiv Search Results:")
363
+ arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
364
+ st.markdown(arxiv_results)
365
+
366
+ elif tab_main == "๐Ÿ’ฌ Chat":
367
+ # Model Selection
368
+ model_choice = st.sidebar.radio(
369
+ "Choose AI Model:",
370
+ ["GPT-4", "Claude-3", "GPT+Claude+Arxiv"]
371
+ )
372
+
373
+ # Chat Interface
374
+ user_input = st.text_area("Message:", height=100)
375
+
376
+ if st.button("Send ๐Ÿ“จ"):
377
+ if user_input:
378
+ if model_choice == "GPT-4":
379
+ gpt_response = process_with_gpt(user_input)
380
+ elif model_choice == "Claude-3":
381
+ claude_response = process_with_claude(user_input)
382
+ else: # Both + Arxiv
383
+ col1, col2, col3 = st.columns(3)
384
+ with col1:
385
+ st.subheader("GPT-4:")
386
+ try:
387
+ gpt_response = process_with_gpt(user_input)
388
+ except:
389
+ st.write('GPT-4 out of tokens')
390
+ with col2:
391
+ st.subheader("Claude-3:")
392
+ try:
393
+ claude_response = process_with_claude(user_input)
394
+ except:
395
+ st.write('Claude-3 out of tokens')
396
+ with col3:
397
+ st.subheader("Arxiv Search:")
398
+ with st.spinner("Searching ArXiv..."):
399
+ results = perform_ai_lookup(user_input)
400
+ st.markdown(results)
401
+
402
+ elif tab_main == "๐Ÿ“ธ Media Gallery":
403
+ create_media_gallery()
404
+
405
+ elif tab_main == "๐Ÿ” Search ArXiv":
406
+ query = st.text_input("Enter your research query:")
407
+ if query:
408
+ with st.spinner("Searching ArXiv..."):
409
+ results = perform_ai_lookup(query)
410
+ st.markdown(results)
411
+
412
+ elif tab_main == "๐Ÿ“ File Editor":
413
+ if hasattr(st.session_state, 'current_file'):
414
+ st.subheader(f"Editing: {st.session_state.current_file}")
415
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
416
+ if st.button("Save Changes"):
417
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
418
+ file.write(new_content)
419
+ st.success("File updated successfully!")
420
+
421
+ # Always show file manager in sidebar
422
+ display_file_manager()
423
 
424
  def create_media_gallery():
425
  """Create the media gallery interface."""
 
436
  with cols[idx % num_cols]:
437
  img = Image.open(image_file)
438
  st.image(img, use_container_width=True)
 
 
439
  if st.button(f"Analyze {os.path.basename(image_file)}"):
440
  analysis = process_image(image_file,
441
  "Describe this image in detail and identify key elements.")
 
460
  analysis = process_video_with_gpt(video_file,
461
  "Describe what's happening in this video.")
462
  st.markdown(analysis)
 
 
463
 
464
  def get_media_html(media_path, media_type="video", width="100%"):
465
  """Generate HTML for media player."""
 
479
  </audio>
480
  '''
481
 
482
+ def display_file_manager():
483
+ """Display file management sidebar."""
484
+ st.sidebar.title("๐Ÿ“ File Management")
485
+
486
+ all_files = glob.glob("*.md")
487
+ all_files.sort(reverse=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488
 
489
+ if st.sidebar.button("๐Ÿ—‘ Delete All"):
490
+ for file in all_files:
491
+ os.remove(file)
492
+ st.rerun()
493
 
494
+ if st.sidebar.button("โฌ‡๏ธ Download All"):
495
+ zip_file = create_zip_of_files(all_files)
496
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
 
 
 
 
497
 
498
+ for file in all_files:
499
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
 
 
500
  with col1:
501
+ if st.button("๐ŸŒ", key=f"view_{file}"):
502
+ st.session_state.current_file = file
503
+ st.session_state.file_content = load_file(file)
 
 
 
504
  with col2:
505
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
 
 
 
 
 
506
  with col3:
507
+ if st.button("๐Ÿ“‚", key=f"edit_{file}"):
508
+ st.session_state.current_file = file
509
+ st.session_state.file_content = load_file(file)
510
+ with col4:
511
+ if st.button("๐Ÿ—‘", key=f"delete_{file}"):
512
+ os.remove(file)
513
+ st.rerun()
514
 
515
+ def generate_filename(prompt, file_type):
516
+ """Generate a filename based on prompt and time."""
517
+ central = pytz.timezone('US/Central')
518
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
519
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
520
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
521
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
 
 
 
 
 
 
 
 
522
 
523
+ def create_file(filename, prompt, response):
524
+ """Create and save a file."""
525
+ with open(filename, 'w', encoding='utf-8') as file:
526
+ file.write(prompt + "\n\n" + response)
527
+
528
+ def load_file(file_name):
529
+ """Load file content."""
530
+ with open(file_name, "r", encoding='utf-8') as file:
531
+ content = file.read()
532
+ return content
533
+
534
+ def create_zip_of_files(files):
535
+ """Create zip archive of files."""
536
+ zip_name = "all_files.zip"
537
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
538
+ for file in files:
539
+ zipf.write(file)
540
+ return zip_name
541
+
542
+ def get_download_link(file):
543
+ """Create download link for file."""
544
+ with open(file, "rb") as f:
545
+ contents = f.read()
546
+ b64 = base64.b64encode(contents).decode()
547
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}๐Ÿ“‚</a>'
548
+
549
+ def extract_urls(text):
550
+ """Extract URLs from text."""
551
+ try:
552
+ date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
553
+ abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
554
+ pdf_link_pattern = re.compile(r'\[โฌ‡๏ธ\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
555
+ title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
556
+
557
+ date_matches = date_pattern.findall(text)
558
+ abs_link_matches = abs_link_pattern.findall(text)
559
+ pdf_link_matches = pdf_link_pattern.findall(text)
560
+ title_matches = title_pattern.findall(text)
561
+
562
+ markdown_text = ""
563
+ for i in range(len(date_matches)):
564
+ date = date_matches[i]
565
+ title = title_matches[i]
566
+ abs_link = abs_link_matches[i][1]
567
+ pdf_link = pdf_link_matches[i]
568
+ markdown_text += f"**Date:** {date}\n\n"
569
+ markdown_text += f"**Title:** {title}\n\n"
570
+ markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
571
+ markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
572
+ markdown_text += "---\n\n"
573
+ return markdown_text
574
+ except:
575
+ return ''
576
+
577
+ # Run the application
578
  if __name__ == "__main__":
579
  main()