Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
import streamlit as st
|
3 |
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
|
4 |
import plotly.graph_objects as go
|
@@ -34,14 +33,20 @@ st.set_page_config(
|
|
34 |
}
|
35 |
)
|
36 |
load_dotenv()
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
40 |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
|
41 |
HF_KEY = os.getenv('HF_KEY')
|
42 |
API_URL = os.getenv('API_URL')
|
43 |
|
44 |
-
# Session states
|
45 |
if 'transcript_history' not in st.session_state:
|
46 |
st.session_state['transcript_history'] = []
|
47 |
if 'chat_history' not in st.session_state:
|
@@ -58,10 +63,8 @@ if 'edit_new_name' not in st.session_state:
|
|
58 |
st.session_state['edit_new_name'] = ""
|
59 |
if 'edit_new_content' not in st.session_state:
|
60 |
st.session_state['edit_new_content'] = ""
|
61 |
-
if '
|
62 |
-
st.session_state['
|
63 |
-
if 'viewing_file_type' not in st.session_state:
|
64 |
-
st.session_state['viewing_file_type'] = None
|
65 |
if 'should_rerun' not in st.session_state:
|
66 |
st.session_state['should_rerun'] = False
|
67 |
|
@@ -82,35 +85,37 @@ FILE_EMOJIS = {
|
|
82 |
}
|
83 |
|
84 |
def clean_for_speech(text: str) -> str:
|
85 |
-
# Remove \n
|
86 |
text = text.replace("\n", " ")
|
87 |
-
# Remove </s>
|
88 |
text = text.replace("</s>", " ")
|
89 |
-
# Remove markdown headings (#)
|
90 |
text = text.replace("#", "")
|
91 |
-
# Remove links
|
92 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
93 |
-
# Collapse multiple spaces
|
94 |
text = re.sub(r"\s+", " ", text).strip()
|
95 |
return text
|
96 |
|
97 |
-
def generate_filename(
|
98 |
-
#
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
with open(filename, 'w', encoding='utf-8') as f:
|
107 |
f.write(prompt + "\n\n" + response)
|
108 |
-
|
109 |
|
110 |
def get_download_link(file):
|
111 |
with open(file, "rb") as f:
|
112 |
b64 = base64.b64encode(f.read()).decode()
|
113 |
-
# It's a zip file download
|
114 |
return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">๐ Download {os.path.basename(file)}</a>'
|
115 |
|
116 |
@st.cache_resource
|
@@ -166,7 +171,6 @@ def process_audio(audio_path):
|
|
166 |
with open(audio_path, "rb") as f:
|
167 |
transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
|
168 |
st.session_state.messages.append({"role": "user", "content": transcription.text})
|
169 |
-
# No immediate rerun.
|
170 |
return transcription.text
|
171 |
|
172 |
def process_video(video_path, seconds_per_frame=1):
|
@@ -219,8 +223,8 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
|
|
219 |
|
220 |
st.markdown(result)
|
221 |
|
|
|
222 |
if vocal_summary:
|
223 |
-
# Clean before speech
|
224 |
main_text = clean_for_speech(r2)
|
225 |
audio_file_main = speak_with_edge_tts(main_text)
|
226 |
st.write("### ๐๏ธ Vocal Summary (Short Answer)")
|
@@ -248,8 +252,8 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
|
|
248 |
|
249 |
elapsed = time.time()-start
|
250 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
251 |
-
|
252 |
-
create_file(
|
253 |
return result
|
254 |
|
255 |
def process_with_gpt(text):
|
@@ -265,7 +269,7 @@ def process_with_gpt(text):
|
|
265 |
)
|
266 |
ans = c.choices[0].message.content
|
267 |
st.write("GPT-4o: " + ans)
|
268 |
-
create_file(
|
269 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
270 |
return ans
|
271 |
|
@@ -281,18 +285,17 @@ def process_with_claude(text):
|
|
281 |
)
|
282 |
ans = r.content[0].text
|
283 |
st.write("Claude: " + ans)
|
284 |
-
create_file(
|
285 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
286 |
return ans
|
287 |
|
288 |
def create_zip_of_files(md_files, mp3_files):
|
289 |
-
# Exclude README.md
|
290 |
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
291 |
-
|
292 |
all_files = md_files + mp3_files
|
293 |
if not all_files:
|
294 |
return None
|
295 |
-
# Build a descriptive name
|
296 |
stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
|
297 |
joined = "_".join(stems)
|
298 |
if len(joined) > 50:
|
@@ -303,121 +306,109 @@ def create_zip_of_files(md_files, mp3_files):
|
|
303 |
z.write(f)
|
304 |
return zip_name
|
305 |
|
306 |
-
def get_media_html(p,typ="video",w="100%"):
|
307 |
-
d = base64.b64encode(open(p,'rb').read()).decode()
|
308 |
-
if typ=="video":
|
309 |
-
return f'<video width="{w}" controls autoplay muted loop><source src="data:video/mp4;base64,{d}" type="video/mp4"></video>'
|
310 |
-
else:
|
311 |
-
return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
|
312 |
-
|
313 |
def load_files_for_sidebar():
|
314 |
-
# Gather
|
315 |
md_files = glob.glob("*.md")
|
316 |
mp3_files = glob.glob("*.mp3")
|
317 |
|
318 |
-
# Exclude README.md
|
319 |
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
320 |
|
321 |
all_files = md_files + mp3_files
|
|
|
322 |
# Group by first 10 chars of filename
|
323 |
-
# Note: We assume all files have at least 10 chars before underscore from generate_filename
|
324 |
groups = defaultdict(list)
|
325 |
for f in all_files:
|
326 |
fname = os.path.basename(f)
|
327 |
-
prefix = fname[:10] # first 10 chars
|
328 |
groups[prefix].append(f)
|
329 |
|
330 |
-
# Sort
|
331 |
-
# Also sort files within each group by mod time descending
|
332 |
for prefix in groups:
|
333 |
groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
|
334 |
|
335 |
-
# Sort
|
336 |
sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
|
337 |
|
338 |
return groups, sorted_prefixes
|
339 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
def display_file_manager_sidebar(groups, sorted_prefixes):
|
341 |
st.sidebar.title("๐ต Audio & Document Manager")
|
342 |
|
343 |
-
# Collect all files
|
344 |
-
|
345 |
-
|
346 |
for prefix in groups:
|
347 |
for f in groups[prefix]:
|
348 |
if f.endswith(".md"):
|
349 |
-
|
350 |
elif f.endswith(".mp3"):
|
351 |
-
|
352 |
|
353 |
top_bar = st.sidebar.columns(3)
|
354 |
with top_bar[0]:
|
355 |
if st.button("๐ Del All MD"):
|
356 |
-
for f in
|
357 |
os.remove(f)
|
358 |
st.session_state.should_rerun = True
|
359 |
with top_bar[1]:
|
360 |
if st.button("๐ Del All MP3"):
|
361 |
-
for f in
|
362 |
os.remove(f)
|
363 |
st.session_state.should_rerun = True
|
364 |
with top_bar[2]:
|
365 |
if st.button("โฌ๏ธ Zip All"):
|
366 |
-
z = create_zip_of_files(
|
367 |
if z:
|
368 |
st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
|
369 |
|
370 |
-
# Display groups in expanders
|
371 |
for prefix in sorted_prefixes:
|
372 |
files = groups[prefix]
|
373 |
-
#
|
374 |
-
|
375 |
-
|
376 |
-
with
|
377 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
for f in files:
|
379 |
fname = os.path.basename(f)
|
380 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
381 |
ext = os.path.splitext(fname)[1].lower().strip('.')
|
382 |
st.write(f"**{fname}** - {ctime}")
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
if ext == "md":
|
391 |
-
if st.button("โ๏ธEdit", key="edit_"+f):
|
392 |
-
st.session_state.editing_file = f
|
393 |
-
st.session_state.edit_new_name = fname.replace(".md","")
|
394 |
-
st.session_state.edit_new_content = open(f,'r',encoding='utf-8').read()
|
395 |
-
st.session_state.should_rerun = True
|
396 |
-
with file_buttons_col[2]:
|
397 |
-
if st.button("๐Del", key="del_"+f):
|
398 |
-
os.remove(f)
|
399 |
-
st.session_state.should_rerun = True
|
400 |
-
|
401 |
-
# If editing an md file
|
402 |
-
if st.session_state.editing_file and os.path.exists(st.session_state.editing_file):
|
403 |
-
st.sidebar.subheader(f"Editing: {os.path.basename(st.session_state.editing_file)}")
|
404 |
-
st.session_state.edit_new_name = st.sidebar.text_input("New name (no extension):", value=st.session_state.edit_new_name)
|
405 |
-
st.session_state.edit_new_content = st.sidebar.text_area("Content:", st.session_state.edit_new_content, height=200)
|
406 |
-
c1,c2 = st.sidebar.columns(2)
|
407 |
-
with c1:
|
408 |
-
if st.button("Save"):
|
409 |
-
old_path = st.session_state.editing_file
|
410 |
-
new_path = st.session_state.edit_new_name + ".md"
|
411 |
-
if new_path != os.path.basename(old_path):
|
412 |
-
os.rename(old_path, new_path)
|
413 |
-
with open(new_path,'w',encoding='utf-8') as f:
|
414 |
-
f.write(st.session_state.edit_new_content)
|
415 |
-
st.session_state.editing_file = None
|
416 |
-
st.session_state.should_rerun = True
|
417 |
-
with c2:
|
418 |
-
if st.button("Cancel"):
|
419 |
-
st.session_state.editing_file = None
|
420 |
-
st.session_state.should_rerun = True
|
421 |
|
422 |
def main():
|
423 |
st.sidebar.markdown("### ๐ฒBikeAI๐ Multi-Agent Research AI")
|
@@ -425,7 +416,6 @@ def main():
|
|
425 |
|
426 |
model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
|
427 |
|
428 |
-
# Main Input Component
|
429 |
mycomponent = components.declare_component("mycomponent", path="mycomponent")
|
430 |
val = mycomponent(my_input_value="Hello")
|
431 |
if val:
|
@@ -463,16 +453,13 @@ def main():
|
|
463 |
st.subheader("๐ Search ArXiv")
|
464 |
q=st.text_input("Research query:")
|
465 |
|
466 |
-
# ๐๏ธ Audio Generation Options
|
467 |
st.markdown("### ๐๏ธ Audio Generation Options")
|
468 |
vocal_summary = st.checkbox("๐๏ธ Vocal Summary (Short Answer)", value=True)
|
469 |
extended_refs = st.checkbox("๐ Extended References & Summaries (Long)", value=False)
|
470 |
titles_summary = st.checkbox("๐ Paper Titles Only", value=True)
|
471 |
|
472 |
-
if q:
|
473 |
-
q =
|
474 |
-
if q and st.button("Run ArXiv Query"):
|
475 |
-
perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
|
476 |
|
477 |
elif tab_main == "๐ค Voice Input":
|
478 |
st.subheader("๏ฟฝ๏ฟฝ Voice Recognition")
|
@@ -550,27 +537,31 @@ def main():
|
|
550 |
else:
|
551 |
st.write("Select a file from the sidebar to edit.")
|
552 |
|
553 |
-
# After main content, load
|
554 |
groups, sorted_prefixes = load_files_for_sidebar()
|
555 |
display_file_manager_sidebar(groups, sorted_prefixes)
|
556 |
|
557 |
-
# If viewing a
|
558 |
-
if st.session_state.
|
559 |
st.write("---")
|
560 |
-
st.write(f"**Viewing
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
|
|
|
|
|
|
|
|
574 |
if st.session_state.should_rerun:
|
575 |
st.session_state.should_rerun = False
|
576 |
st.rerun()
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
|
3 |
import plotly.graph_objects as go
|
|
|
33 |
}
|
34 |
)
|
35 |
load_dotenv()
|
36 |
+
|
37 |
+
openai_api_key = os.getenv('OPENAI_API_KEY', "")
|
38 |
+
anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
|
39 |
+
if 'OPENAI_API_KEY' in st.secrets:
|
40 |
+
openai_api_key = st.secrets['OPENAI_API_KEY']
|
41 |
+
if 'ANTHROPIC_API_KEY' in st.secrets:
|
42 |
+
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
|
43 |
+
|
44 |
+
openai.api_key = openai_api_key
|
45 |
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
46 |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
|
47 |
HF_KEY = os.getenv('HF_KEY')
|
48 |
API_URL = os.getenv('API_URL')
|
49 |
|
|
|
50 |
if 'transcript_history' not in st.session_state:
|
51 |
st.session_state['transcript_history'] = []
|
52 |
if 'chat_history' not in st.session_state:
|
|
|
63 |
st.session_state['edit_new_name'] = ""
|
64 |
if 'edit_new_content' not in st.session_state:
|
65 |
st.session_state['edit_new_content'] = ""
|
66 |
+
if 'viewing_prefix' not in st.session_state:
|
67 |
+
st.session_state['viewing_prefix'] = None
|
|
|
|
|
68 |
if 'should_rerun' not in st.session_state:
|
69 |
st.session_state['should_rerun'] = False
|
70 |
|
|
|
85 |
}
|
86 |
|
87 |
def clean_for_speech(text: str) -> str:
|
|
|
88 |
text = text.replace("\n", " ")
|
|
|
89 |
text = text.replace("</s>", " ")
|
|
|
90 |
text = text.replace("#", "")
|
91 |
+
# Remove links like (https://...)
|
92 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
|
|
93 |
text = re.sub(r"\s+", " ", text).strip()
|
94 |
return text
|
95 |
|
96 |
+
def generate_filename(content, file_type="md"):
|
97 |
+
# Prefix: YYMM_HHmm_ -> total 10 chars including underscore
|
98 |
+
# Actually: %y%m_%H%M gives 9 chars, add trailing underscore for total 10 chars.
|
99 |
+
# Example: 23 09 _12 45 _ => '2309_1245_'
|
100 |
+
prefix = datetime.now().strftime("%y%m_%H%M") + "_"
|
101 |
+
# Extract some words from content
|
102 |
+
words = re.findall(r"\w+", content)
|
103 |
+
# Take first 3 words for filename segment
|
104 |
+
name_text = '_'.join(words[:3]) if words else 'file'
|
105 |
+
filename = f"{prefix}{name_text}.{file_type}"
|
106 |
+
return filename
|
107 |
+
|
108 |
+
def create_file(prompt, response, file_type="md"):
|
109 |
+
# Decide which content to base the filename on (prefer response)
|
110 |
+
base_content = response.strip() if response.strip() else prompt.strip()
|
111 |
+
filename = generate_filename(base_content, file_type)
|
112 |
with open(filename, 'w', encoding='utf-8') as f:
|
113 |
f.write(prompt + "\n\n" + response)
|
114 |
+
return filename
|
115 |
|
116 |
def get_download_link(file):
|
117 |
with open(file, "rb") as f:
|
118 |
b64 = base64.b64encode(f.read()).decode()
|
|
|
119 |
return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">๐ Download {os.path.basename(file)}</a>'
|
120 |
|
121 |
@st.cache_resource
|
|
|
171 |
with open(audio_path, "rb") as f:
|
172 |
transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
|
173 |
st.session_state.messages.append({"role": "user", "content": transcription.text})
|
|
|
174 |
return transcription.text
|
175 |
|
176 |
def process_video(video_path, seconds_per_frame=1):
|
|
|
223 |
|
224 |
st.markdown(result)
|
225 |
|
226 |
+
# Clean for speech before TTS
|
227 |
if vocal_summary:
|
|
|
228 |
main_text = clean_for_speech(r2)
|
229 |
audio_file_main = speak_with_edge_tts(main_text)
|
230 |
st.write("### ๐๏ธ Vocal Summary (Short Answer)")
|
|
|
252 |
|
253 |
elapsed = time.time()-start
|
254 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
255 |
+
# Create MD file from q and result
|
256 |
+
create_file(q, result, "md")
|
257 |
return result
|
258 |
|
259 |
def process_with_gpt(text):
|
|
|
269 |
)
|
270 |
ans = c.choices[0].message.content
|
271 |
st.write("GPT-4o: " + ans)
|
272 |
+
create_file(text, ans, "md")
|
273 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
274 |
return ans
|
275 |
|
|
|
285 |
)
|
286 |
ans = r.content[0].text
|
287 |
st.write("Claude: " + ans)
|
288 |
+
create_file(text, ans, "md")
|
289 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
290 |
return ans
|
291 |
|
292 |
def create_zip_of_files(md_files, mp3_files):
|
293 |
+
# Exclude README.md
|
294 |
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
|
|
295 |
all_files = md_files + mp3_files
|
296 |
if not all_files:
|
297 |
return None
|
298 |
+
# Build a descriptive name
|
299 |
stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
|
300 |
joined = "_".join(stems)
|
301 |
if len(joined) > 50:
|
|
|
306 |
z.write(f)
|
307 |
return zip_name
|
308 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
def load_files_for_sidebar():
|
310 |
+
# Gather files
|
311 |
md_files = glob.glob("*.md")
|
312 |
mp3_files = glob.glob("*.mp3")
|
313 |
|
314 |
+
# Exclude README.md
|
315 |
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
316 |
|
317 |
all_files = md_files + mp3_files
|
318 |
+
|
319 |
# Group by first 10 chars of filename
|
|
|
320 |
groups = defaultdict(list)
|
321 |
for f in all_files:
|
322 |
fname = os.path.basename(f)
|
323 |
+
prefix = fname[:10] # first 10 chars as group prefix
|
324 |
groups[prefix].append(f)
|
325 |
|
326 |
+
# Sort files in each group by mod time descending
|
|
|
327 |
for prefix in groups:
|
328 |
groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
|
329 |
|
330 |
+
# Sort prefixes by newest file time
|
331 |
sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
|
332 |
|
333 |
return groups, sorted_prefixes
|
334 |
|
335 |
+
def extract_keywords_from_md(files):
|
336 |
+
# Combine all MD content
|
337 |
+
text = ""
|
338 |
+
for f in files:
|
339 |
+
if f.endswith(".md"):
|
340 |
+
c = open(f,'r',encoding='utf-8').read()
|
341 |
+
text += " " + c
|
342 |
+
# Extract first 5 unique words
|
343 |
+
words = re.findall(r"\w+", text.lower())
|
344 |
+
unique_words = []
|
345 |
+
for w in words:
|
346 |
+
if w not in unique_words:
|
347 |
+
unique_words.append(w)
|
348 |
+
if len(unique_words) == 5:
|
349 |
+
break
|
350 |
+
return unique_words
|
351 |
+
|
352 |
def display_file_manager_sidebar(groups, sorted_prefixes):
|
353 |
st.sidebar.title("๐ต Audio & Document Manager")
|
354 |
|
355 |
+
# Collect all md and mp3 files for zip operations
|
356 |
+
all_md = []
|
357 |
+
all_mp3 = []
|
358 |
for prefix in groups:
|
359 |
for f in groups[prefix]:
|
360 |
if f.endswith(".md"):
|
361 |
+
all_md.append(f)
|
362 |
elif f.endswith(".mp3"):
|
363 |
+
all_mp3.append(f)
|
364 |
|
365 |
top_bar = st.sidebar.columns(3)
|
366 |
with top_bar[0]:
|
367 |
if st.button("๐ Del All MD"):
|
368 |
+
for f in all_md:
|
369 |
os.remove(f)
|
370 |
st.session_state.should_rerun = True
|
371 |
with top_bar[1]:
|
372 |
if st.button("๐ Del All MP3"):
|
373 |
+
for f in all_mp3:
|
374 |
os.remove(f)
|
375 |
st.session_state.should_rerun = True
|
376 |
with top_bar[2]:
|
377 |
if st.button("โฌ๏ธ Zip All"):
|
378 |
+
z = create_zip_of_files(all_md, all_mp3)
|
379 |
if z:
|
380 |
st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
|
381 |
|
|
|
382 |
for prefix in sorted_prefixes:
|
383 |
files = groups[prefix]
|
384 |
+
# Extract 5-word keywords from MD in this group
|
385 |
+
kw = extract_keywords_from_md(files)
|
386 |
+
keywords_str = " ".join(kw) if kw else "No Keywords"
|
387 |
+
with st.sidebar.expander(f"{prefix} Files ({len(files)}) - Keywords: {keywords_str}", expanded=True):
|
388 |
+
# Delete group / View group
|
389 |
+
c1,c2 = st.columns(2)
|
390 |
+
with c1:
|
391 |
+
if st.button("๐View Group", key="view_group_"+prefix):
|
392 |
+
st.session_state.viewing_prefix = prefix
|
393 |
+
# No rerun needed, just state update
|
394 |
+
with c2:
|
395 |
+
if st.button("๐Del Group", key="del_group_"+prefix):
|
396 |
+
for f in files:
|
397 |
+
os.remove(f)
|
398 |
+
st.session_state.should_rerun = True
|
399 |
+
|
400 |
for f in files:
|
401 |
fname = os.path.basename(f)
|
402 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
403 |
ext = os.path.splitext(fname)[1].lower().strip('.')
|
404 |
st.write(f"**{fname}** - {ctime}")
|
405 |
+
# Individual file actions are less necessary if we have group actions
|
406 |
+
# But we can still provide them if desired.
|
407 |
+
# The user requested grouping primarily, but we can keep minimal file actions if needed.
|
408 |
+
# In instructions now, main focus is group view/delete.
|
409 |
+
# We'll omit individual file view/edit here since we have group view.
|
410 |
+
# If needed, re-add them similarly as before.
|
411 |
+
# For now, rely on "View Group" to see all files.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
|
413 |
def main():
|
414 |
st.sidebar.markdown("### ๐ฒBikeAI๐ Multi-Agent Research AI")
|
|
|
416 |
|
417 |
model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
|
418 |
|
|
|
419 |
mycomponent = components.declare_component("mycomponent", path="mycomponent")
|
420 |
val = mycomponent(my_input_value="Hello")
|
421 |
if val:
|
|
|
453 |
st.subheader("๐ Search ArXiv")
|
454 |
q=st.text_input("Research query:")
|
455 |
|
|
|
456 |
st.markdown("### ๐๏ธ Audio Generation Options")
|
457 |
vocal_summary = st.checkbox("๐๏ธ Vocal Summary (Short Answer)", value=True)
|
458 |
extended_refs = st.checkbox("๐ Extended References & Summaries (Long)", value=False)
|
459 |
titles_summary = st.checkbox("๐ Paper Titles Only", value=True)
|
460 |
|
461 |
+
if q and st.button("Run ArXiv Query"):
|
462 |
+
perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
|
|
|
|
|
463 |
|
464 |
elif tab_main == "๐ค Voice Input":
|
465 |
st.subheader("๏ฟฝ๏ฟฝ Voice Recognition")
|
|
|
537 |
else:
|
538 |
st.write("Select a file from the sidebar to edit.")
|
539 |
|
540 |
+
# After main content, load and show file groups in sidebar
|
541 |
groups, sorted_prefixes = load_files_for_sidebar()
|
542 |
display_file_manager_sidebar(groups, sorted_prefixes)
|
543 |
|
544 |
+
# If viewing a prefix group, show all files in main area
|
545 |
+
if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
|
546 |
st.write("---")
|
547 |
+
st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
|
548 |
+
# Show all files in this prefix group in order (mp3 and md)
|
549 |
+
# Sort by mod time descending (already sorted)
|
550 |
+
for f in groups[st.session_state.viewing_prefix]:
|
551 |
+
fname = os.path.basename(f)
|
552 |
+
ext = os.path.splitext(fname)[1].lower().strip('.')
|
553 |
+
st.write(f"### {fname}")
|
554 |
+
if ext == "md":
|
555 |
+
content = open(f,'r',encoding='utf-8').read()
|
556 |
+
st.markdown(content)
|
557 |
+
elif ext == "mp3":
|
558 |
+
st.audio(f)
|
559 |
+
else:
|
560 |
+
# just show a download link
|
561 |
+
st.markdown(get_download_link(f), unsafe_allow_html=True)
|
562 |
+
if st.button("Close Group View"):
|
563 |
+
st.session_state.viewing_prefix = None
|
564 |
+
|
565 |
if st.session_state.should_rerun:
|
566 |
st.session_state.should_rerun = False
|
567 |
st.rerun()
|