Spaces:
Running
Running
oceansweep
commited on
Upload 40 files
Browse files- App_Function_Libraries/Gradio_UI/Arxiv_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Backup_Functionality.py +3 -3
- App_Function_Libraries/Gradio_UI/Book_Ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Character_Chat_tab.py +8 -7
- App_Function_Libraries/Gradio_UI/Character_interaction_tab.py +2 -2
- App_Function_Libraries/Gradio_UI/Chat_Workflows.py +1 -1
- App_Function_Libraries/Gradio_UI/Chat_ui.py +7 -7
- App_Function_Libraries/Gradio_UI/Config_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Embeddings_tab.py +3 -3
- App_Function_Libraries/Gradio_UI/Evaluations_Benchmarks_tab.py +2 -2
- App_Function_Libraries/Gradio_UI/Explain_summarize_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Import_Functionality.py +4 -4
- App_Function_Libraries/Gradio_UI/Introduction_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Keywords.py +4 -4
- App_Function_Libraries/Gradio_UI/Live_Recording.py +1 -1
- App_Function_Libraries/Gradio_UI/Llamafile_tab.py +312 -0
- App_Function_Libraries/Gradio_UI/MMLU_Pro_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Media_edit.py +4 -4
- App_Function_Libraries/Gradio_UI/Media_wiki_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py +2 -2
- App_Function_Libraries/Gradio_UI/Plaintext_tab_import.py +1 -1
- App_Function_Libraries/Gradio_UI/Podcast_tab.py +2 -2
- App_Function_Libraries/Gradio_UI/Prompt_Suggestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/RAG_Chat_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/RAG_QA_Chat_Notes.py +1 -1
- App_Function_Libraries/Gradio_UI/RAG_QA_Chat_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Re_summarize_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Search_Tab.py +3 -3
- App_Function_Libraries/Gradio_UI/Transcript_comparison.py +1 -1
- App_Function_Libraries/Gradio_UI/Trash.py +4 -4
- App_Function_Libraries/Gradio_UI/Utilities.py +3 -3
- App_Function_Libraries/Gradio_UI/Video_transcription_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/View_DB_Items_tab.py +3 -3
- App_Function_Libraries/Gradio_UI/View_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Website_scraping_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Writing_tab.py +5 -5
App_Function_Libraries/Gradio_UI/Arxiv_tab.py
CHANGED
@@ -20,7 +20,7 @@ import gradio as gr
|
|
20 |
# Functions:
|
21 |
|
22 |
def create_arxiv_tab():
|
23 |
-
with gr.TabItem("Arxiv Search & Ingest"):
|
24 |
gr.Markdown("# arXiv Search, Browse, Download, and Ingest")
|
25 |
gr.Markdown("#### Thank you to arXiv for use of its open access interoperability.")
|
26 |
with gr.Row():
|
|
|
20 |
# Functions:
|
21 |
|
22 |
def create_arxiv_tab():
|
23 |
+
with gr.TabItem("Arxiv Search & Ingest", visible=True):
|
24 |
gr.Markdown("# arXiv Search, Browse, Download, and Ingest")
|
25 |
gr.Markdown("#### Thank you to arXiv for use of its open access interoperability.")
|
26 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py
CHANGED
@@ -20,7 +20,7 @@ from App_Function_Libraries.Metrics.logger_config import logger
|
|
20 |
# Functions:
|
21 |
|
22 |
def create_audio_processing_tab():
|
23 |
-
with gr.TabItem("Audio File Transcription + Summarization"):
|
24 |
gr.Markdown("# Transcribe & Summarize Audio Files from URLs or Local Files!")
|
25 |
with gr.Row():
|
26 |
with gr.Column():
|
|
|
20 |
# Functions:
|
21 |
|
22 |
def create_audio_processing_tab():
|
23 |
+
with gr.TabItem("Audio File Transcription + Summarization", visible=True):
|
24 |
gr.Markdown("# Transcribe & Summarize Audio Files from URLs or Local Files!")
|
25 |
with gr.Row():
|
26 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Backup_Functionality.py
CHANGED
@@ -34,7 +34,7 @@ def restore_backup(backup_name: str) -> str:
|
|
34 |
|
35 |
|
36 |
def create_backup_tab():
|
37 |
-
with gr.Tab("Create Backup"):
|
38 |
gr.Markdown("# Create a backup of the database")
|
39 |
gr.Markdown("This will create a backup of the database in the backup directory(the default backup directory is `/tldw_DB_Backups/')")
|
40 |
with gr.Row():
|
@@ -46,7 +46,7 @@ def create_backup_tab():
|
|
46 |
|
47 |
|
48 |
def create_view_backups_tab():
|
49 |
-
with gr.TabItem("View Backups"):
|
50 |
gr.Markdown("# Browse available backups")
|
51 |
with gr.Row():
|
52 |
with gr.Column():
|
@@ -57,7 +57,7 @@ def create_view_backups_tab():
|
|
57 |
|
58 |
|
59 |
def create_restore_backup_tab():
|
60 |
-
with gr.TabItem("Restore Backup"):
|
61 |
gr.Markdown("# Restore a backup of the database")
|
62 |
with gr.Column():
|
63 |
backup_input = gr.Textbox(label="Backup Filename")
|
|
|
34 |
|
35 |
|
36 |
def create_backup_tab():
|
37 |
+
with gr.Tab("Create Backup", visible=True):
|
38 |
gr.Markdown("# Create a backup of the database")
|
39 |
gr.Markdown("This will create a backup of the database in the backup directory(the default backup directory is `/tldw_DB_Backups/')")
|
40 |
with gr.Row():
|
|
|
46 |
|
47 |
|
48 |
def create_view_backups_tab():
|
49 |
+
with gr.TabItem("View Backups", visible=True):
|
50 |
gr.Markdown("# Browse available backups")
|
51 |
with gr.Row():
|
52 |
with gr.Column():
|
|
|
57 |
|
58 |
|
59 |
def create_restore_backup_tab():
|
60 |
+
with gr.TabItem("Restore Backup", visible=True):
|
61 |
gr.Markdown("# Restore a backup of the database")
|
62 |
with gr.Column():
|
63 |
backup_input = gr.Textbox(label="Backup Filename")
|
App_Function_Libraries/Gradio_UI/Book_Ingestion_tab.py
CHANGED
@@ -22,7 +22,7 @@ from App_Function_Libraries.Books.Book_Ingestion_Lib import process_zip_file, im
|
|
22 |
|
23 |
|
24 |
def create_import_book_tab():
|
25 |
-
with gr.TabItem("Ebook(epub) Files"):
|
26 |
with gr.Row():
|
27 |
with gr.Column():
|
28 |
gr.Markdown("# Import .epub files")
|
|
|
22 |
|
23 |
|
24 |
def create_import_book_tab():
|
25 |
+
with gr.TabItem("Ebook(epub) Files", visible=True):
|
26 |
with gr.Row():
|
27 |
with gr.Column():
|
28 |
gr.Markdown("# Import .epub files")
|
App_Function_Libraries/Gradio_UI/Character_Chat_tab.py
CHANGED
@@ -254,7 +254,7 @@ def export_all_characters():
|
|
254 |
# Gradio tabs
|
255 |
|
256 |
def create_character_card_interaction_tab():
|
257 |
-
with gr.TabItem("Chat with a Character Card"):
|
258 |
gr.Markdown("# Chat with a Character Card")
|
259 |
with gr.Row():
|
260 |
with gr.Column(scale=1):
|
@@ -1025,7 +1025,7 @@ def create_character_card_interaction_tab():
|
|
1025 |
|
1026 |
|
1027 |
def create_character_chat_mgmt_tab():
|
1028 |
-
with gr.TabItem("Character and Chat Management"):
|
1029 |
gr.Markdown("# Character and Chat Management")
|
1030 |
|
1031 |
with gr.Row():
|
@@ -1063,12 +1063,12 @@ def create_character_chat_mgmt_tab():
|
|
1063 |
conversation_mapping = gr.State({})
|
1064 |
|
1065 |
with gr.Tabs():
|
1066 |
-
with gr.TabItem("Edit"):
|
1067 |
chat_content = gr.TextArea(label="Chat/Character Content (JSON)", lines=20, max_lines=50)
|
1068 |
save_button = gr.Button("Save Changes")
|
1069 |
delete_button = gr.Button("Delete Conversation/Character", variant="stop")
|
1070 |
|
1071 |
-
with gr.TabItem("Preview"):
|
1072 |
chat_preview = gr.HTML(label="Chat/Character Preview")
|
1073 |
result_message = gr.Markdown("")
|
1074 |
|
@@ -1380,7 +1380,7 @@ def create_character_chat_mgmt_tab():
|
|
1380 |
)
|
1381 |
|
1382 |
def create_custom_character_card_tab():
|
1383 |
-
with gr.TabItem("Create a New Character Card"):
|
1384 |
gr.Markdown("# Create a New Character Card (v2)")
|
1385 |
|
1386 |
with gr.Row():
|
@@ -1630,7 +1630,7 @@ def create_custom_character_card_tab():
|
|
1630 |
|
1631 |
#v1
|
1632 |
def create_character_card_validation_tab():
|
1633 |
-
with gr.TabItem("Validate Character Card"):
|
1634 |
gr.Markdown("# Validate Character Card (v2)")
|
1635 |
gr.Markdown("Upload a character card (PNG, WEBP, or JSON) to validate whether it conforms to the Character Card V2 specification.")
|
1636 |
|
@@ -1786,7 +1786,7 @@ def create_character_card_validation_tab():
|
|
1786 |
|
1787 |
|
1788 |
def create_export_characters_tab():
|
1789 |
-
with gr.TabItem("Export Characters"):
|
1790 |
gr.Markdown("# Export Characters")
|
1791 |
gr.Markdown("Export character cards individually as JSON files or all together as a ZIP file.")
|
1792 |
|
@@ -1808,6 +1808,7 @@ def create_export_characters_tab():
|
|
1808 |
export_output = gr.File(label="Exported Character(s)", interactive=False)
|
1809 |
export_status = gr.Markdown("")
|
1810 |
|
|
|
1811 |
def export_single_character_wrapper(character_selection):
|
1812 |
file_path, status_message = export_single_character(character_selection)
|
1813 |
if file_path:
|
|
|
254 |
# Gradio tabs
|
255 |
|
256 |
def create_character_card_interaction_tab():
|
257 |
+
with gr.TabItem("Chat with a Character Card", visible=True):
|
258 |
gr.Markdown("# Chat with a Character Card")
|
259 |
with gr.Row():
|
260 |
with gr.Column(scale=1):
|
|
|
1025 |
|
1026 |
|
1027 |
def create_character_chat_mgmt_tab():
|
1028 |
+
with gr.TabItem("Character and Chat Management", visible=True):
|
1029 |
gr.Markdown("# Character and Chat Management")
|
1030 |
|
1031 |
with gr.Row():
|
|
|
1063 |
conversation_mapping = gr.State({})
|
1064 |
|
1065 |
with gr.Tabs():
|
1066 |
+
with gr.TabItem("Edit", visible=True):
|
1067 |
chat_content = gr.TextArea(label="Chat/Character Content (JSON)", lines=20, max_lines=50)
|
1068 |
save_button = gr.Button("Save Changes")
|
1069 |
delete_button = gr.Button("Delete Conversation/Character", variant="stop")
|
1070 |
|
1071 |
+
with gr.TabItem("Preview", visible=True):
|
1072 |
chat_preview = gr.HTML(label="Chat/Character Preview")
|
1073 |
result_message = gr.Markdown("")
|
1074 |
|
|
|
1380 |
)
|
1381 |
|
1382 |
def create_custom_character_card_tab():
|
1383 |
+
with gr.TabItem("Create a New Character Card", visible=True):
|
1384 |
gr.Markdown("# Create a New Character Card (v2)")
|
1385 |
|
1386 |
with gr.Row():
|
|
|
1630 |
|
1631 |
#v1
|
1632 |
def create_character_card_validation_tab():
|
1633 |
+
with gr.TabItem("Validate Character Card", visible=True):
|
1634 |
gr.Markdown("# Validate Character Card (v2)")
|
1635 |
gr.Markdown("Upload a character card (PNG, WEBP, or JSON) to validate whether it conforms to the Character Card V2 specification.")
|
1636 |
|
|
|
1786 |
|
1787 |
|
1788 |
def create_export_characters_tab():
|
1789 |
+
with gr.TabItem("Export Characters", visible=True):
|
1790 |
gr.Markdown("# Export Characters")
|
1791 |
gr.Markdown("Export character cards individually as JSON files or all together as a ZIP file.")
|
1792 |
|
|
|
1808 |
export_output = gr.File(label="Exported Character(s)", interactive=False)
|
1809 |
export_status = gr.Markdown("")
|
1810 |
|
1811 |
+
# FIXME
|
1812 |
def export_single_character_wrapper(character_selection):
|
1813 |
file_path, status_message = export_single_character(character_selection)
|
1814 |
if file_path:
|
App_Function_Libraries/Gradio_UI/Character_interaction_tab.py
CHANGED
@@ -253,7 +253,7 @@ def character_interaction(character1: str, character2: str, api_endpoint: str, a
|
|
253 |
|
254 |
|
255 |
def create_multiple_character_chat_tab():
|
256 |
-
with gr.TabItem("Multi-Character Chat"):
|
257 |
characters, conversation, current_character, other_character = character_interaction_setup()
|
258 |
|
259 |
with gr.Blocks() as character_interaction:
|
@@ -393,7 +393,7 @@ def create_multiple_character_chat_tab():
|
|
393 |
|
394 |
# From `Fuzzlewumper` on Reddit.
|
395 |
def create_narrator_controlled_conversation_tab():
|
396 |
-
with gr.TabItem("Narrator-Controlled Conversation"):
|
397 |
gr.Markdown("# Narrator-Controlled Conversation")
|
398 |
|
399 |
with gr.Row():
|
|
|
253 |
|
254 |
|
255 |
def create_multiple_character_chat_tab():
|
256 |
+
with gr.TabItem("Multi-Character Chat", visible=True):
|
257 |
characters, conversation, current_character, other_character = character_interaction_setup()
|
258 |
|
259 |
with gr.Blocks() as character_interaction:
|
|
|
393 |
|
394 |
# From `Fuzzlewumper` on Reddit.
|
395 |
def create_narrator_controlled_conversation_tab():
|
396 |
+
with gr.TabItem("Narrator-Controlled Conversation", visible=True):
|
397 |
gr.Markdown("# Narrator-Controlled Conversation")
|
398 |
|
399 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Chat_Workflows.py
CHANGED
@@ -24,7 +24,7 @@ with json_path.open('r') as f:
|
|
24 |
|
25 |
|
26 |
def chat_workflows_tab():
|
27 |
-
with gr.TabItem("Chat Workflows"):
|
28 |
gr.Markdown("# Workflows using LLMs")
|
29 |
chat_history = gr.State([])
|
30 |
media_content = gr.State({})
|
|
|
24 |
|
25 |
|
26 |
def chat_workflows_tab():
|
27 |
+
with gr.TabItem("Chat Workflows", visible=True):
|
28 |
gr.Markdown("# Workflows using LLMs")
|
29 |
chat_history = gr.State([])
|
30 |
media_content = gr.State({})
|
App_Function_Libraries/Gradio_UI/Chat_ui.py
CHANGED
@@ -206,7 +206,7 @@ def create_chat_interface():
|
|
206 |
font-size: 14px !important;
|
207 |
}
|
208 |
"""
|
209 |
-
with gr.TabItem("Remote LLM Chat (Horizontal)"):
|
210 |
gr.Markdown("# Chat with a designated LLM Endpoint, using your selected item as starting context")
|
211 |
chat_history = gr.State([])
|
212 |
media_content = gr.State({})
|
@@ -417,7 +417,7 @@ def create_chat_interface_stacked():
|
|
417 |
font-size: 14px !important;
|
418 |
}
|
419 |
"""
|
420 |
-
with gr.TabItem("Remote LLM Chat - Stacked"):
|
421 |
gr.Markdown("# Stacked Chat")
|
422 |
chat_history = gr.State([])
|
423 |
media_content = gr.State({})
|
@@ -580,7 +580,7 @@ def create_chat_interface_multi_api():
|
|
580 |
overflow-y: auto;
|
581 |
}
|
582 |
"""
|
583 |
-
with gr.TabItem("One Prompt - Multiple APIs"):
|
584 |
gr.Markdown("# One Prompt but Multiple APIs Chat Interface")
|
585 |
|
586 |
with gr.Row():
|
@@ -759,7 +759,7 @@ def create_chat_interface_four():
|
|
759 |
}
|
760 |
"""
|
761 |
|
762 |
-
with gr.TabItem("Four Independent API Chats"):
|
763 |
gr.Markdown("# Four Independent API Chat Interfaces")
|
764 |
|
765 |
with gr.Row():
|
@@ -956,7 +956,7 @@ def chat_wrapper_single(message, chat_history, chatbot, api_endpoint, api_key, t
|
|
956 |
|
957 |
# FIXME - Finish implementing functions + testing/valdidation
|
958 |
def create_chat_management_tab():
|
959 |
-
with gr.TabItem("Chat Management"):
|
960 |
gr.Markdown("# Chat Management")
|
961 |
|
962 |
with gr.Row():
|
@@ -967,12 +967,12 @@ def create_chat_management_tab():
|
|
967 |
conversation_mapping = gr.State({})
|
968 |
|
969 |
with gr.Tabs():
|
970 |
-
with gr.TabItem("Edit"):
|
971 |
chat_content = gr.TextArea(label="Chat Content (JSON)", lines=20, max_lines=50)
|
972 |
save_button = gr.Button("Save Changes")
|
973 |
delete_button = gr.Button("Delete Conversation", variant="stop")
|
974 |
|
975 |
-
with gr.TabItem("Preview"):
|
976 |
chat_preview = gr.HTML(label="Chat Preview")
|
977 |
result_message = gr.Markdown("")
|
978 |
|
|
|
206 |
font-size: 14px !important;
|
207 |
}
|
208 |
"""
|
209 |
+
with gr.TabItem("Remote LLM Chat (Horizontal)", visible=True):
|
210 |
gr.Markdown("# Chat with a designated LLM Endpoint, using your selected item as starting context")
|
211 |
chat_history = gr.State([])
|
212 |
media_content = gr.State({})
|
|
|
417 |
font-size: 14px !important;
|
418 |
}
|
419 |
"""
|
420 |
+
with gr.TabItem("Remote LLM Chat - Stacked", visible=True):
|
421 |
gr.Markdown("# Stacked Chat")
|
422 |
chat_history = gr.State([])
|
423 |
media_content = gr.State({})
|
|
|
580 |
overflow-y: auto;
|
581 |
}
|
582 |
"""
|
583 |
+
with gr.TabItem("One Prompt - Multiple APIs", visible=True):
|
584 |
gr.Markdown("# One Prompt but Multiple APIs Chat Interface")
|
585 |
|
586 |
with gr.Row():
|
|
|
759 |
}
|
760 |
"""
|
761 |
|
762 |
+
with gr.TabItem("Four Independent API Chats", visible=True):
|
763 |
gr.Markdown("# Four Independent API Chat Interfaces")
|
764 |
|
765 |
with gr.Row():
|
|
|
956 |
|
957 |
# FIXME - Finish implementing functions + testing/valdidation
|
958 |
def create_chat_management_tab():
|
959 |
+
with gr.TabItem("Chat Management", visible=True):
|
960 |
gr.Markdown("# Chat Management")
|
961 |
|
962 |
with gr.Row():
|
|
|
967 |
conversation_mapping = gr.State({})
|
968 |
|
969 |
with gr.Tabs():
|
970 |
+
with gr.TabItem("Edit", visible=True):
|
971 |
chat_content = gr.TextArea(label="Chat Content (JSON)", lines=20, max_lines=50)
|
972 |
save_button = gr.Button("Save Changes")
|
973 |
delete_button = gr.Button("Delete Conversation", variant="stop")
|
974 |
|
975 |
+
with gr.TabItem("Preview", visible=True):
|
976 |
chat_preview = gr.HTML(label="Chat Preview")
|
977 |
result_message = gr.Markdown("")
|
978 |
|
App_Function_Libraries/Gradio_UI/Config_tab.py
CHANGED
@@ -25,7 +25,7 @@ def save_config_from_text(text):
|
|
25 |
|
26 |
|
27 |
def create_config_editor_tab():
|
28 |
-
with gr.TabItem("Edit Config"):
|
29 |
gr.Markdown("# Edit Configuration File")
|
30 |
|
31 |
with gr.Row():
|
|
|
25 |
|
26 |
|
27 |
def create_config_editor_tab():
|
28 |
+
with gr.TabItem("Edit Config", visible=True):
|
29 |
gr.Markdown("# Edit Configuration File")
|
30 |
|
31 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Embeddings_tab.py
CHANGED
@@ -22,7 +22,7 @@ from App_Function_Libraries.Chunk_Lib import improved_chunking_process, chunk_fo
|
|
22 |
# Functions:
|
23 |
|
24 |
def create_embeddings_tab():
|
25 |
-
with gr.TabItem("Create Embeddings"):
|
26 |
gr.Markdown("# Create Embeddings for All Content")
|
27 |
|
28 |
with gr.Row():
|
@@ -185,7 +185,7 @@ def create_embeddings_tab():
|
|
185 |
|
186 |
|
187 |
def create_view_embeddings_tab():
|
188 |
-
with gr.TabItem("View/Update Embeddings"):
|
189 |
gr.Markdown("# View and Update Embeddings")
|
190 |
item_mapping = gr.State({})
|
191 |
with gr.Row():
|
@@ -475,7 +475,7 @@ def create_view_embeddings_tab():
|
|
475 |
|
476 |
|
477 |
def create_purge_embeddings_tab():
|
478 |
-
with gr.TabItem("Purge Embeddings"):
|
479 |
gr.Markdown("# Purge Embeddings")
|
480 |
|
481 |
with gr.Row():
|
|
|
22 |
# Functions:
|
23 |
|
24 |
def create_embeddings_tab():
|
25 |
+
with gr.TabItem("Create Embeddings", visible=True):
|
26 |
gr.Markdown("# Create Embeddings for All Content")
|
27 |
|
28 |
with gr.Row():
|
|
|
185 |
|
186 |
|
187 |
def create_view_embeddings_tab():
|
188 |
+
with gr.TabItem("View/Update Embeddings", visible=True):
|
189 |
gr.Markdown("# View and Update Embeddings")
|
190 |
item_mapping = gr.State({})
|
191 |
with gr.Row():
|
|
|
475 |
|
476 |
|
477 |
def create_purge_embeddings_tab():
|
478 |
+
with gr.TabItem("Purge Embeddings", visible=True):
|
479 |
gr.Markdown("# Purge Embeddings")
|
480 |
|
481 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Evaluations_Benchmarks_tab.py
CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
|
|
6 |
from App_Function_Libraries.Benchmarks_Evaluations.ms_g_eval import run_geval
|
7 |
|
8 |
def create_geval_tab():
|
9 |
-
with gr.Tab("G-Eval"):
|
10 |
gr.Markdown("# G-Eval Summarization Evaluation")
|
11 |
with gr.Row():
|
12 |
with gr.Column():
|
@@ -31,7 +31,7 @@ def create_geval_tab():
|
|
31 |
|
32 |
|
33 |
def create_infinite_bench_tab():
|
34 |
-
with gr.Tab("Infinite Bench"):
|
35 |
gr.Markdown("# Infinite Bench Evaluation (Coming Soon)")
|
36 |
with gr.Row():
|
37 |
with gr.Column():
|
|
|
6 |
from App_Function_Libraries.Benchmarks_Evaluations.ms_g_eval import run_geval
|
7 |
|
8 |
def create_geval_tab():
|
9 |
+
with gr.Tab("G-Eval", visible=True):
|
10 |
gr.Markdown("# G-Eval Summarization Evaluation")
|
11 |
with gr.Row():
|
12 |
with gr.Column():
|
|
|
31 |
|
32 |
|
33 |
def create_infinite_bench_tab():
|
34 |
+
with gr.Tab("Infinite Bench", visible=True):
|
35 |
gr.Markdown("# Infinite Bench Evaluation (Coming Soon)")
|
36 |
with gr.Row():
|
37 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Explain_summarize_tab.py
CHANGED
@@ -24,7 +24,7 @@ from App_Function_Libraries.Summarization.Summarization_General_Lib import summa
|
|
24 |
# Functions:
|
25 |
|
26 |
def create_summarize_explain_tab():
|
27 |
-
with gr.TabItem("Analyze Text"):
|
28 |
gr.Markdown("# Analyze / Explain / Summarize Text without ingesting it into the DB")
|
29 |
with gr.Row():
|
30 |
with gr.Column():
|
|
|
24 |
# Functions:
|
25 |
|
26 |
def create_summarize_explain_tab():
|
27 |
+
with gr.TabItem("Analyze Text", visible=True):
|
28 |
gr.Markdown("# Analyze / Explain / Summarize Text without ingesting it into the DB")
|
29 |
with gr.Row():
|
30 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Import_Functionality.py
CHANGED
@@ -159,7 +159,7 @@ def parse_obsidian_note(file_path):
|
|
159 |
}
|
160 |
|
161 |
def create_import_single_prompt_tab():
|
162 |
-
with gr.TabItem("Import a Prompt"):
|
163 |
gr.Markdown("# Import a prompt into the database")
|
164 |
|
165 |
with gr.Row():
|
@@ -213,7 +213,7 @@ def create_import_single_prompt_tab():
|
|
213 |
)
|
214 |
|
215 |
def create_import_item_tab():
|
216 |
-
with gr.TabItem("Import Markdown/Text Files"):
|
217 |
gr.Markdown("# Import a markdown file or text file into the database")
|
218 |
gr.Markdown("...and have it tagged + summarized")
|
219 |
with gr.Row():
|
@@ -246,7 +246,7 @@ def create_import_item_tab():
|
|
246 |
|
247 |
|
248 |
def create_import_multiple_prompts_tab():
|
249 |
-
with gr.TabItem("Import Multiple Prompts"):
|
250 |
gr.Markdown("# Import multiple prompts into the database")
|
251 |
gr.Markdown("Upload a zip file containing multiple prompt files (txt or md)")
|
252 |
|
@@ -326,7 +326,7 @@ def create_import_multiple_prompts_tab():
|
|
326 |
|
327 |
|
328 |
def create_import_obsidian_vault_tab():
|
329 |
-
with gr.TabItem("Import Obsidian Vault"):
|
330 |
gr.Markdown("## Import Obsidian Vault")
|
331 |
with gr.Row():
|
332 |
with gr.Column():
|
|
|
159 |
}
|
160 |
|
161 |
def create_import_single_prompt_tab():
|
162 |
+
with gr.TabItem("Import a Prompt", visible=True):
|
163 |
gr.Markdown("# Import a prompt into the database")
|
164 |
|
165 |
with gr.Row():
|
|
|
213 |
)
|
214 |
|
215 |
def create_import_item_tab():
|
216 |
+
with gr.TabItem("Import Markdown/Text Files", visible=True):
|
217 |
gr.Markdown("# Import a markdown file or text file into the database")
|
218 |
gr.Markdown("...and have it tagged + summarized")
|
219 |
with gr.Row():
|
|
|
246 |
|
247 |
|
248 |
def create_import_multiple_prompts_tab():
|
249 |
+
with gr.TabItem("Import Multiple Prompts", visible=True):
|
250 |
gr.Markdown("# Import multiple prompts into the database")
|
251 |
gr.Markdown("Upload a zip file containing multiple prompt files (txt or md)")
|
252 |
|
|
|
326 |
|
327 |
|
328 |
def create_import_obsidian_vault_tab():
|
329 |
+
with gr.TabItem("Import Obsidian Vault", visible=True):
|
330 |
gr.Markdown("## Import Obsidian Vault")
|
331 |
with gr.Row():
|
332 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Introduction_tab.py
CHANGED
@@ -16,7 +16,7 @@ from App_Function_Libraries.DB.DB_Manager import get_db_config
|
|
16 |
|
17 |
|
18 |
def create_introduction_tab():
|
19 |
-
with
|
20 |
db_config = get_db_config()
|
21 |
db_type = db_config['type']
|
22 |
gr.Markdown(f"# tldw: Your LLM-powered Research Multi-tool (Using {db_type.capitalize()} Database)")
|
|
|
16 |
|
17 |
|
18 |
def create_introduction_tab():
|
19 |
+
with gr.TabItem("Introduction", visible=True):
|
20 |
db_config = get_db_config()
|
21 |
db_type = db_config['type']
|
22 |
gr.Markdown(f"# tldw: Your LLM-powered Research Multi-tool (Using {db_type.capitalize()} Database)")
|
App_Function_Libraries/Gradio_UI/Keywords.py
CHANGED
@@ -19,7 +19,7 @@ from App_Function_Libraries.DB.DB_Manager import add_keyword, delete_keyword, ke
|
|
19 |
|
20 |
|
21 |
def create_export_keywords_tab():
|
22 |
-
with gr.
|
23 |
with gr.Row():
|
24 |
with gr.Column():
|
25 |
export_keywords_button = gr.Button("Export Keywords")
|
@@ -33,7 +33,7 @@ def create_export_keywords_tab():
|
|
33 |
)
|
34 |
|
35 |
def create_view_keywords_tab():
|
36 |
-
with gr.TabItem("View Keywords"):
|
37 |
gr.Markdown("# Browse Keywords")
|
38 |
with gr.Column():
|
39 |
browse_output = gr.Markdown()
|
@@ -42,7 +42,7 @@ def create_view_keywords_tab():
|
|
42 |
|
43 |
|
44 |
def create_add_keyword_tab():
|
45 |
-
with gr.TabItem("Add Keywords"):
|
46 |
with gr.Row():
|
47 |
with gr.Column():
|
48 |
gr.Markdown("# Add Keywords to the Database")
|
@@ -54,7 +54,7 @@ def create_add_keyword_tab():
|
|
54 |
|
55 |
|
56 |
def create_delete_keyword_tab():
|
57 |
-
with gr.Tab("Delete Keywords"):
|
58 |
with gr.Row():
|
59 |
with gr.Column():
|
60 |
gr.Markdown("# Delete Keywords from the Database")
|
|
|
19 |
|
20 |
|
21 |
def create_export_keywords_tab():
|
22 |
+
with gr.TabItem("Export Keywords", visible=True):
|
23 |
with gr.Row():
|
24 |
with gr.Column():
|
25 |
export_keywords_button = gr.Button("Export Keywords")
|
|
|
33 |
)
|
34 |
|
35 |
def create_view_keywords_tab():
|
36 |
+
with gr.TabItem("View Keywords", visible=True):
|
37 |
gr.Markdown("# Browse Keywords")
|
38 |
with gr.Column():
|
39 |
browse_output = gr.Markdown()
|
|
|
42 |
|
43 |
|
44 |
def create_add_keyword_tab():
|
45 |
+
with gr.TabItem("Add Keywords", visible=True):
|
46 |
with gr.Row():
|
47 |
with gr.Column():
|
48 |
gr.Markdown("# Add Keywords to the Database")
|
|
|
54 |
|
55 |
|
56 |
def create_delete_keyword_tab():
|
57 |
+
with gr.Tab("Delete Keywords", visible=True):
|
58 |
with gr.Row():
|
59 |
with gr.Column():
|
60 |
gr.Markdown("# Delete Keywords from the Database")
|
App_Function_Libraries/Gradio_UI/Live_Recording.py
CHANGED
@@ -22,7 +22,7 @@ whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large",
|
|
22 |
"distil-large-v2", "distil-medium.en", "distil-small.en"]
|
23 |
|
24 |
def create_live_recording_tab():
|
25 |
-
with gr.Tab("Live Recording and Transcription"):
|
26 |
gr.Markdown("# Live Audio Recording and Transcription")
|
27 |
with gr.Row():
|
28 |
with gr.Column():
|
|
|
22 |
"distil-large-v2", "distil-medium.en", "distil-small.en"]
|
23 |
|
24 |
def create_live_recording_tab():
|
25 |
+
with gr.Tab("Live Recording and Transcription", visible=True):
|
26 |
gr.Markdown("# Live Audio Recording and Transcription")
|
27 |
with gr.Row():
|
28 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Llamafile_tab.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Llamafile_tab.py
|
2 |
+
# Description: Gradio interface for configuring and launching Llamafile with Local LLMs
|
3 |
+
|
4 |
+
# Imports
|
5 |
+
import os
|
6 |
+
import logging
|
7 |
+
from typing import Tuple, Optional
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
|
11 |
+
from App_Function_Libraries.Local_LLM.Local_LLM_Inference_Engine_Lib import (
|
12 |
+
download_llm_model,
|
13 |
+
llm_models,
|
14 |
+
start_llamafile,
|
15 |
+
get_gguf_llamafile_files
|
16 |
+
)
|
17 |
+
#
|
18 |
+
#######################################################################################################################
|
19 |
+
#
|
20 |
+
# Functions:
|
21 |
+
|
22 |
+
def create_chat_with_llamafile_tab():
|
23 |
+
# Function to update model path based on selection
|
24 |
+
def on_local_model_change(selected_model: str, search_directory: str) -> str:
|
25 |
+
if selected_model and isinstance(search_directory, str):
|
26 |
+
model_path = os.path.abspath(os.path.join(search_directory, selected_model))
|
27 |
+
logging.debug(f"Selected model path: {model_path}") # Debug print for selected model path
|
28 |
+
return model_path
|
29 |
+
return "Invalid selection or directory."
|
30 |
+
|
31 |
+
# Function to update the dropdown with available models
|
32 |
+
def update_dropdowns(search_directory: str) -> Tuple[dict, str]:
|
33 |
+
logging.debug(f"User-entered directory: {search_directory}") # Debug print for directory
|
34 |
+
if not os.path.isdir(search_directory):
|
35 |
+
logging.debug(f"Directory does not exist: {search_directory}") # Debug print for non-existing directory
|
36 |
+
return gr.update(choices=[], value=None), "Directory does not exist."
|
37 |
+
|
38 |
+
logging.debug(f"Directory exists: {search_directory}, scanning for files...") # Confirm directory exists
|
39 |
+
model_files = get_gguf_llamafile_files(search_directory)
|
40 |
+
|
41 |
+
if not model_files:
|
42 |
+
logging.debug(f"No model files found in {search_directory}") # Debug print for no files found
|
43 |
+
return gr.update(choices=[], value=None), "No model files found in the specified directory."
|
44 |
+
|
45 |
+
# Update the dropdown choices with the model files found
|
46 |
+
logging.debug(f"Models loaded from {search_directory}: {model_files}") # Debug: Print model files loaded
|
47 |
+
return gr.update(choices=model_files, value=None), f"Models loaded from {search_directory}."
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
def download_preset_model(selected_model: str) -> Tuple[str, str]:
|
52 |
+
"""
|
53 |
+
Downloads the selected preset model.
|
54 |
+
|
55 |
+
Args:
|
56 |
+
selected_model (str): The key of the selected preset model.
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
Tuple[str, str]: Status message and the path to the downloaded model.
|
60 |
+
"""
|
61 |
+
model_info = llm_models.get(selected_model)
|
62 |
+
if not model_info:
|
63 |
+
return "Invalid model selection.", ""
|
64 |
+
|
65 |
+
try:
|
66 |
+
model_path = download_llm_model(
|
67 |
+
model_name=model_info["name"],
|
68 |
+
model_url=model_info["url"],
|
69 |
+
model_filename=model_info["filename"],
|
70 |
+
model_hash=model_info["hash"]
|
71 |
+
)
|
72 |
+
return f"Model '{model_info['name']}' downloaded successfully.", model_path
|
73 |
+
except Exception as e:
|
74 |
+
logging.error(f"Error downloading model: {e}")
|
75 |
+
return f"Failed to download model: {e}", ""
|
76 |
+
|
77 |
+
with gr.TabItem("Local LLM with Llamafile", visible=True):
|
78 |
+
gr.Markdown("# Settings for Llamafile")
|
79 |
+
|
80 |
+
with gr.Row():
|
81 |
+
with gr.Column():
|
82 |
+
am_noob = gr.Checkbox(label="Enable Sane Defaults", value=False, visible=True)
|
83 |
+
advanced_mode_toggle = gr.Checkbox(label="Advanced Mode - Show All Settings", value=False)
|
84 |
+
# Advanced Inputs
|
85 |
+
verbose_checked = gr.Checkbox(label="Enable Verbose Output", value=False, visible=False)
|
86 |
+
threads_checked = gr.Checkbox(label="Set CPU Threads", value=False, visible=False)
|
87 |
+
threads_value = gr.Number(label="Number of CPU Threads", value=None, precision=0, visible=False)
|
88 |
+
threads_batched_checked = gr.Checkbox(label="Enable Batched Inference", value=False, visible=False)
|
89 |
+
threads_batched_value = gr.Number(label="Batch Size for Inference", value=None, precision=0, visible=False)
|
90 |
+
model_alias_checked = gr.Checkbox(label="Set Model Alias", value=False, visible=False)
|
91 |
+
model_alias_value = gr.Textbox(label="Model Alias", value="", visible=False)
|
92 |
+
ctx_size_checked = gr.Checkbox(label="Set Prompt Context Size", value=False, visible=False)
|
93 |
+
ctx_size_value = gr.Number(label="Prompt Context Size", value=8124, precision=0, visible=False)
|
94 |
+
ngl_checked = gr.Checkbox(label="Enable GPU Layers", value=False, visible=True)
|
95 |
+
ngl_value = gr.Number(label="Number of GPU Layers", value=None, precision=0, visible=True)
|
96 |
+
batch_size_checked = gr.Checkbox(label="Set Batch Size", value=False, visible=False)
|
97 |
+
batch_size_value = gr.Number(label="Batch Size", value=512, visible=False)
|
98 |
+
memory_f32_checked = gr.Checkbox(label="Use 32-bit Floating Point", value=False, visible=False)
|
99 |
+
numa_checked = gr.Checkbox(label="Enable NUMA", value=False, visible=False)
|
100 |
+
server_timeout_value = gr.Number(label="Server Timeout", value=600, precision=0, visible=False)
|
101 |
+
host_checked = gr.Checkbox(label="Set IP to Listen On", value=False, visible=False)
|
102 |
+
host_value = gr.Textbox(label="Host IP Address", value="", visible=False)
|
103 |
+
port_checked = gr.Checkbox(label="Set Server Port", value=False, visible=False)
|
104 |
+
port_value = gr.Number(label="Port Number", value=8080, precision=0, visible=False)
|
105 |
+
api_key_checked = gr.Checkbox(label="Set API Key", value=False, visible=False)
|
106 |
+
api_key_value = gr.Textbox(label="API Key", value="", visible=False)
|
107 |
+
http_threads_checked = gr.Checkbox(label="Set HTTP Server Threads", value=False, visible=False)
|
108 |
+
http_threads_value = gr.Number(label="Number of HTTP Server Threads", value=None, precision=0, visible=False)
|
109 |
+
hf_repo_checked = gr.Checkbox(label="Use Huggingface Repo Model", value=False, visible=False)
|
110 |
+
hf_repo_value = gr.Textbox(label="Huggingface Repo Name", value="", visible=False)
|
111 |
+
hf_file_checked = gr.Checkbox(label="Set Huggingface Model File", value=False, visible=False)
|
112 |
+
hf_file_value = gr.Textbox(label="Huggingface Model File", value="", visible=False)
|
113 |
+
|
114 |
+
with gr.Column():
|
115 |
+
# Model Selection Section
|
116 |
+
gr.Markdown("## Model Selection")
|
117 |
+
|
118 |
+
# Option 1: Select from Local Filesystem
|
119 |
+
with gr.Row():
|
120 |
+
search_directory = gr.Textbox(label="Model Directory",
|
121 |
+
placeholder="Enter directory path(currently '.\Models')",
|
122 |
+
value=".\Models",
|
123 |
+
interactive=True)
|
124 |
+
|
125 |
+
# Initial population of local models
|
126 |
+
initial_dropdown_update, _ = update_dropdowns(".\Models")
|
127 |
+
refresh_button = gr.Button("Refresh Models")
|
128 |
+
local_model_dropdown = gr.Dropdown(label="Select Model from Directory", choices=[])
|
129 |
+
# Display selected model path
|
130 |
+
model_value = gr.Textbox(label="Selected Model File Path", value="", interactive=False)
|
131 |
+
|
132 |
+
# Option 2: Download Preset Models
|
133 |
+
gr.Markdown("## Download Preset Models")
|
134 |
+
|
135 |
+
preset_model_dropdown = gr.Dropdown(
|
136 |
+
label="Select a Preset Model",
|
137 |
+
choices=list(llm_models.keys()),
|
138 |
+
value=None,
|
139 |
+
interactive=True,
|
140 |
+
info="Choose a preset model to download."
|
141 |
+
)
|
142 |
+
download_preset_button = gr.Button("Download Selected Preset")
|
143 |
+
|
144 |
+
with gr.Row():
|
145 |
+
with gr.Column():
|
146 |
+
start_button = gr.Button("Start Llamafile")
|
147 |
+
stop_button = gr.Button("Stop Llamafile (doesn't work)")
|
148 |
+
output_display = gr.Markdown()
|
149 |
+
|
150 |
+
|
151 |
+
# Show/hide advanced inputs based on toggle
|
152 |
+
def update_visibility(show_advanced: bool):
|
153 |
+
components = [
|
154 |
+
verbose_checked, threads_checked, threads_value,
|
155 |
+
http_threads_checked, http_threads_value,
|
156 |
+
hf_repo_checked, hf_repo_value,
|
157 |
+
hf_file_checked, hf_file_value,
|
158 |
+
ctx_size_checked, ctx_size_value,
|
159 |
+
ngl_checked, ngl_value,
|
160 |
+
host_checked, host_value,
|
161 |
+
port_checked, port_value
|
162 |
+
]
|
163 |
+
return [gr.update(visible=show_advanced) for _ in components]
|
164 |
+
|
165 |
+
def on_start_button_click(
|
166 |
+
am_noob: bool,
|
167 |
+
verbose_checked: bool,
|
168 |
+
threads_checked: bool,
|
169 |
+
threads_value: Optional[int],
|
170 |
+
threads_batched_checked: bool,
|
171 |
+
threads_batched_value: Optional[int],
|
172 |
+
model_alias_checked: bool,
|
173 |
+
model_alias_value: str,
|
174 |
+
http_threads_checked: bool,
|
175 |
+
http_threads_value: Optional[int],
|
176 |
+
model_value: str,
|
177 |
+
hf_repo_checked: bool,
|
178 |
+
hf_repo_value: str,
|
179 |
+
hf_file_checked: bool,
|
180 |
+
hf_file_value: str,
|
181 |
+
ctx_size_checked: bool,
|
182 |
+
ctx_size_value: Optional[int],
|
183 |
+
ngl_checked: bool,
|
184 |
+
ngl_value: Optional[int],
|
185 |
+
batch_size_checked: bool,
|
186 |
+
batch_size_value: Optional[int],
|
187 |
+
memory_f32_checked: bool,
|
188 |
+
numa_checked: bool,
|
189 |
+
server_timeout_value: Optional[int],
|
190 |
+
host_checked: bool,
|
191 |
+
host_value: str,
|
192 |
+
port_checked: bool,
|
193 |
+
port_value: Optional[int],
|
194 |
+
api_key_checked: bool,
|
195 |
+
api_key_value: str
|
196 |
+
) -> str:
|
197 |
+
"""
|
198 |
+
Event handler for the Start Llamafile button.
|
199 |
+
"""
|
200 |
+
try:
|
201 |
+
result = start_llamafile(
|
202 |
+
am_noob,
|
203 |
+
verbose_checked,
|
204 |
+
threads_checked,
|
205 |
+
threads_value,
|
206 |
+
threads_batched_checked,
|
207 |
+
threads_batched_value,
|
208 |
+
model_alias_checked,
|
209 |
+
model_alias_value,
|
210 |
+
http_threads_checked,
|
211 |
+
http_threads_value,
|
212 |
+
model_value,
|
213 |
+
hf_repo_checked,
|
214 |
+
hf_repo_value,
|
215 |
+
hf_file_checked,
|
216 |
+
hf_file_value,
|
217 |
+
ctx_size_checked,
|
218 |
+
ctx_size_value,
|
219 |
+
ngl_checked,
|
220 |
+
ngl_value,
|
221 |
+
batch_size_checked,
|
222 |
+
batch_size_value,
|
223 |
+
memory_f32_checked,
|
224 |
+
numa_checked,
|
225 |
+
server_timeout_value,
|
226 |
+
host_checked,
|
227 |
+
host_value,
|
228 |
+
port_checked,
|
229 |
+
port_value,
|
230 |
+
api_key_checked,
|
231 |
+
api_key_value
|
232 |
+
)
|
233 |
+
return result
|
234 |
+
except Exception as e:
|
235 |
+
logging.error(f"Error starting Llamafile: {e}")
|
236 |
+
return f"Failed to start Llamafile: {e}"
|
237 |
+
|
238 |
+
advanced_mode_toggle.change(
|
239 |
+
fn=update_visibility,
|
240 |
+
inputs=[advanced_mode_toggle],
|
241 |
+
outputs=[
|
242 |
+
verbose_checked, threads_checked, threads_value,
|
243 |
+
http_threads_checked, http_threads_value,
|
244 |
+
hf_repo_checked, hf_repo_value,
|
245 |
+
hf_file_checked, hf_file_value,
|
246 |
+
ctx_size_checked, ctx_size_value,
|
247 |
+
ngl_checked, ngl_value,
|
248 |
+
host_checked, host_value,
|
249 |
+
port_checked, port_value
|
250 |
+
]
|
251 |
+
)
|
252 |
+
|
253 |
+
start_button.click(
|
254 |
+
fn=on_start_button_click,
|
255 |
+
inputs=[
|
256 |
+
am_noob,
|
257 |
+
verbose_checked,
|
258 |
+
threads_checked,
|
259 |
+
threads_value,
|
260 |
+
threads_batched_checked,
|
261 |
+
threads_batched_value,
|
262 |
+
model_alias_checked,
|
263 |
+
model_alias_value,
|
264 |
+
http_threads_checked,
|
265 |
+
http_threads_value,
|
266 |
+
model_value,
|
267 |
+
hf_repo_checked,
|
268 |
+
hf_repo_value,
|
269 |
+
hf_file_checked,
|
270 |
+
hf_file_value,
|
271 |
+
ctx_size_checked,
|
272 |
+
ctx_size_value,
|
273 |
+
ngl_checked,
|
274 |
+
ngl_value,
|
275 |
+
batch_size_checked,
|
276 |
+
batch_size_value,
|
277 |
+
memory_f32_checked,
|
278 |
+
numa_checked,
|
279 |
+
server_timeout_value,
|
280 |
+
host_checked,
|
281 |
+
host_value,
|
282 |
+
port_checked,
|
283 |
+
port_value,
|
284 |
+
api_key_checked,
|
285 |
+
api_key_value
|
286 |
+
],
|
287 |
+
outputs=output_display
|
288 |
+
)
|
289 |
+
|
290 |
+
download_preset_button.click(
|
291 |
+
fn=download_preset_model,
|
292 |
+
inputs=[preset_model_dropdown],
|
293 |
+
outputs=[output_display, model_value]
|
294 |
+
)
|
295 |
+
|
296 |
+
# Click event for refreshing models
|
297 |
+
refresh_button.click(
|
298 |
+
fn=update_dropdowns,
|
299 |
+
inputs=[search_directory], # Ensure that the directory path (string) is passed
|
300 |
+
outputs=[local_model_dropdown, output_display] # Update dropdown and status
|
301 |
+
)
|
302 |
+
|
303 |
+
# Event to update model_value when a model is selected from the dropdown
|
304 |
+
local_model_dropdown.change(
|
305 |
+
fn=on_local_model_change, # Function that calculates the model path
|
306 |
+
inputs=[local_model_dropdown, search_directory], # Inputs: selected model and directory
|
307 |
+
outputs=[model_value] # Output: Update the model_value textbox with the selected model path
|
308 |
+
)
|
309 |
+
|
310 |
+
#
|
311 |
+
#
|
312 |
+
#######################################################################################################################
|
App_Function_Libraries/Gradio_UI/MMLU_Pro_tab.py
CHANGED
@@ -78,7 +78,7 @@ def run_benchmark_from_ui(url, api_key, model, timeout, category, parallel, verb
|
|
78 |
|
79 |
def create_mmlu_pro_tab():
|
80 |
"""Create the Gradio UI tab for MMLU-Pro Benchmark."""
|
81 |
-
with gr.
|
82 |
gr.Markdown("## Run MMLU-Pro Benchmark")
|
83 |
|
84 |
with gr.Row():
|
|
|
78 |
|
79 |
def create_mmlu_pro_tab():
|
80 |
"""Create the Gradio UI tab for MMLU-Pro Benchmark."""
|
81 |
+
with gr.TabItem("MMLU-Pro Benchmark", visible=True):
|
82 |
gr.Markdown("## Run MMLU-Pro Benchmark")
|
83 |
|
84 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Media_edit.py
CHANGED
@@ -16,7 +16,7 @@ from App_Function_Libraries.DB.SQLite_DB import fetch_item_details
|
|
16 |
|
17 |
|
18 |
def create_media_edit_tab():
|
19 |
-
with gr.TabItem("Edit Existing Items"):
|
20 |
gr.Markdown("# Search and Edit Media Items")
|
21 |
|
22 |
with gr.Row():
|
@@ -89,7 +89,7 @@ def create_media_edit_tab():
|
|
89 |
|
90 |
|
91 |
def create_media_edit_and_clone_tab():
|
92 |
-
with gr.TabItem("Clone and Edit Existing Items"):
|
93 |
gr.Markdown("# Search, Edit, and Clone Existing Items")
|
94 |
|
95 |
with gr.Row():
|
@@ -199,7 +199,7 @@ def create_media_edit_and_clone_tab():
|
|
199 |
|
200 |
|
201 |
def create_prompt_edit_tab():
|
202 |
-
with gr.TabItem("Add & Edit Prompts"):
|
203 |
with gr.Row():
|
204 |
with gr.Column():
|
205 |
prompt_dropdown = gr.Dropdown(
|
@@ -239,7 +239,7 @@ def create_prompt_edit_tab():
|
|
239 |
|
240 |
|
241 |
def create_prompt_clone_tab():
|
242 |
-
with gr.TabItem("Clone and Edit Prompts"):
|
243 |
with gr.Row():
|
244 |
with gr.Column():
|
245 |
gr.Markdown("# Clone and Edit Prompts")
|
|
|
16 |
|
17 |
|
18 |
def create_media_edit_tab():
|
19 |
+
with gr.TabItem("Edit Existing Items", visible=True):
|
20 |
gr.Markdown("# Search and Edit Media Items")
|
21 |
|
22 |
with gr.Row():
|
|
|
89 |
|
90 |
|
91 |
def create_media_edit_and_clone_tab():
|
92 |
+
with gr.TabItem("Clone and Edit Existing Items", visible=True):
|
93 |
gr.Markdown("# Search, Edit, and Clone Existing Items")
|
94 |
|
95 |
with gr.Row():
|
|
|
199 |
|
200 |
|
201 |
def create_prompt_edit_tab():
|
202 |
+
with gr.TabItem("Add & Edit Prompts", visible=True):
|
203 |
with gr.Row():
|
204 |
with gr.Column():
|
205 |
prompt_dropdown = gr.Dropdown(
|
|
|
239 |
|
240 |
|
241 |
def create_prompt_clone_tab():
|
242 |
+
with gr.TabItem("Clone and Edit Prompts", visible=True):
|
243 |
with gr.Row():
|
244 |
with gr.Column():
|
245 |
gr.Markdown("# Clone and Edit Prompts")
|
App_Function_Libraries/Gradio_UI/Media_wiki_tab.py
CHANGED
@@ -229,7 +229,7 @@ def save_config(updated_config):
|
|
229 |
|
230 |
|
231 |
def create_mediawiki_config_tab():
|
232 |
-
with gr.TabItem("MediaWiki Import Configuration"):
|
233 |
gr.Markdown("# MediaWiki Import Configuration (Broken currently/doesn't work)")
|
234 |
with gr.Row():
|
235 |
with gr.Column():
|
|
|
229 |
|
230 |
|
231 |
def create_mediawiki_config_tab():
|
232 |
+
with gr.TabItem("MediaWiki Import Configuration", visible=True):
|
233 |
gr.Markdown("# MediaWiki Import Configuration (Broken currently/doesn't work)")
|
234 |
with gr.Row():
|
235 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py
CHANGED
@@ -21,7 +21,7 @@ from App_Function_Libraries.PDF.PDF_Ingestion_Lib import extract_metadata_from_p
|
|
21 |
# Functions:
|
22 |
|
23 |
def create_pdf_ingestion_tab():
|
24 |
-
with gr.TabItem("PDF Ingestion"):
|
25 |
# TODO - Add functionality to extract metadata from pdf as part of conversion process in marker
|
26 |
gr.Markdown("# Ingest PDF Files and Extract Metadata")
|
27 |
with gr.Row():
|
@@ -136,7 +136,7 @@ def test_pdf_ingestion(pdf_file):
|
|
136 |
return f"Error ingesting PDF: {str(e)}", ""
|
137 |
|
138 |
def create_pdf_ingestion_test_tab():
|
139 |
-
with gr.TabItem("Test PDF Ingestion"):
|
140 |
with gr.Row():
|
141 |
with gr.Column():
|
142 |
pdf_file_input = gr.File(label="Upload PDF for testing")
|
|
|
21 |
# Functions:
|
22 |
|
23 |
def create_pdf_ingestion_tab():
|
24 |
+
with gr.TabItem("PDF Ingestion", visible=True):
|
25 |
# TODO - Add functionality to extract metadata from pdf as part of conversion process in marker
|
26 |
gr.Markdown("# Ingest PDF Files and Extract Metadata")
|
27 |
with gr.Row():
|
|
|
136 |
return f"Error ingesting PDF: {str(e)}", ""
|
137 |
|
138 |
def create_pdf_ingestion_test_tab():
|
139 |
+
with gr.TabItem("Test PDF Ingestion", visible=True):
|
140 |
with gr.Row():
|
141 |
with gr.Column():
|
142 |
pdf_file_input = gr.File(label="Upload PDF for testing")
|
App_Function_Libraries/Gradio_UI/Plaintext_tab_import.py
CHANGED
@@ -23,7 +23,7 @@ from App_Function_Libraries.Gradio_UI.Import_Functionality import import_data
|
|
23 |
# Functions:
|
24 |
|
25 |
def create_plain_text_import_tab():
|
26 |
-
with gr.TabItem("Import Plain text & .docx Files"):
|
27 |
with gr.Row():
|
28 |
with gr.Column():
|
29 |
gr.Markdown("# Import Markdown(`.md`)/Text(`.txt`)/rtf & `.docx` Files")
|
|
|
23 |
# Functions:
|
24 |
|
25 |
def create_plain_text_import_tab():
|
26 |
+
with gr.TabItem("Import Plain text & .docx Files", visible=True):
|
27 |
with gr.Row():
|
28 |
with gr.Column():
|
29 |
gr.Markdown("# Import Markdown(`.md`)/Text(`.txt`)/rtf & `.docx` Files")
|
App_Function_Libraries/Gradio_UI/Podcast_tab.py
CHANGED
@@ -17,8 +17,8 @@ from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, updat
|
|
17 |
|
18 |
|
19 |
def create_podcast_tab():
|
20 |
-
with gr.TabItem("Podcast"):
|
21 |
-
gr.Markdown("# Podcast Transcription and Ingestion")
|
22 |
with gr.Row():
|
23 |
with gr.Column():
|
24 |
podcast_url_input = gr.Textbox(label="Podcast URL", placeholder="Enter the podcast URL here")
|
|
|
17 |
|
18 |
|
19 |
def create_podcast_tab():
|
20 |
+
with gr.TabItem("Podcast", visible=True):
|
21 |
+
gr.Markdown("# Podcast Transcription and Ingestion", visible=True)
|
22 |
with gr.Row():
|
23 |
with gr.Column():
|
24 |
podcast_url_input = gr.Textbox(label="Podcast URL", placeholder="Enter the podcast URL here")
|
App_Function_Libraries/Gradio_UI/Prompt_Suggestion_tab.py
CHANGED
@@ -18,7 +18,7 @@ from App_Function_Libraries.Prompt_Engineering.Prompt_Engineering import generat
|
|
18 |
|
19 |
# Gradio tab for prompt suggestion and testing
|
20 |
def create_prompt_suggestion_tab():
|
21 |
-
with gr.TabItem("Prompt Suggestion/Creation"):
|
22 |
gr.Markdown("# Generate and Test AI Prompts with the Metaprompt Approach")
|
23 |
|
24 |
with gr.Row():
|
|
|
18 |
|
19 |
# Gradio tab for prompt suggestion and testing
|
20 |
def create_prompt_suggestion_tab():
|
21 |
+
with gr.TabItem("Prompt Suggestion/Creation", visible=True):
|
22 |
gr.Markdown("# Generate and Test AI Prompts with the Metaprompt Approach")
|
23 |
|
24 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/RAG_Chat_tab.py
CHANGED
@@ -16,7 +16,7 @@ from App_Function_Libraries.RAG.RAG_Library_2 import enhanced_rag_pipeline
|
|
16 |
# Functions:
|
17 |
|
18 |
def create_rag_tab():
|
19 |
-
with gr.TabItem("RAG Search"):
|
20 |
gr.Markdown("# Retrieval-Augmented Generation (RAG) Search")
|
21 |
|
22 |
with gr.Row():
|
|
|
16 |
# Functions:
|
17 |
|
18 |
def create_rag_tab():
|
19 |
+
with gr.TabItem("RAG Search", visible=True):
|
20 |
gr.Markdown("# Retrieval-Augmented Generation (RAG) Search")
|
21 |
|
22 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/RAG_QA_Chat_Notes.py
CHANGED
@@ -16,7 +16,7 @@ from App_Function_Libraries.RAG.RAG_QA_Chat import rag_qa_chat
|
|
16 |
#
|
17 |
# Functions
|
18 |
def create_rag_qa_chat_notes_tab():
|
19 |
-
with gr.TabItem("RAG QA Chat"):
|
20 |
gr.Markdown("# RAG QA Chat")
|
21 |
|
22 |
state = gr.State({
|
|
|
16 |
#
|
17 |
# Functions
|
18 |
def create_rag_qa_chat_notes_tab():
|
19 |
+
with gr.TabItem("RAG QA Chat", visible=True):
|
20 |
gr.Markdown("# RAG QA Chat")
|
21 |
|
22 |
state = gr.State({
|
App_Function_Libraries/Gradio_UI/RAG_QA_Chat_tab.py
CHANGED
@@ -25,7 +25,7 @@ from App_Function_Libraries.RAG.RAG_QA_Chat import load_chat_history, save_chat_
|
|
25 |
# Functions:
|
26 |
|
27 |
def create_rag_qa_chat_tab():
|
28 |
-
with gr.TabItem("RAG QA Chat"):
|
29 |
gr.Markdown("# RAG QA Chat")
|
30 |
|
31 |
with gr.Row():
|
|
|
25 |
# Functions:
|
26 |
|
27 |
def create_rag_qa_chat_tab():
|
28 |
+
with gr.TabItem("RAG QA Chat", visible=True):
|
29 |
gr.Markdown("# RAG QA Chat")
|
30 |
|
31 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Re_summarize_tab.py
CHANGED
@@ -23,7 +23,7 @@ from App_Function_Libraries.Utils.Utils import load_comprehensive_config
|
|
23 |
# Functions:
|
24 |
|
25 |
def create_resummary_tab():
|
26 |
-
with gr.TabItem("Re-Summarize"):
|
27 |
gr.Markdown("# Re-Summarize Existing Content")
|
28 |
with gr.Row():
|
29 |
with gr.Column():
|
|
|
23 |
# Functions:
|
24 |
|
25 |
def create_resummary_tab():
|
26 |
+
with gr.TabItem("Re-Summarize", visible=True):
|
27 |
gr.Markdown("# Re-Summarize Existing Content")
|
28 |
with gr.Row():
|
29 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Search_Tab.py
CHANGED
@@ -80,7 +80,7 @@ def format_as_html(content, title):
|
|
80 |
"""
|
81 |
|
82 |
def create_search_tab():
|
83 |
-
with gr.TabItem("Search / Detailed View"):
|
84 |
gr.Markdown("# Search across all ingested items in the Database")
|
85 |
with gr.Row():
|
86 |
with gr.Column(scale=1):
|
@@ -150,7 +150,7 @@ def display_search_results(query):
|
|
150 |
|
151 |
|
152 |
def create_search_summaries_tab():
|
153 |
-
with gr.TabItem("Search/View Title+Summary
|
154 |
gr.Markdown("# Search across all ingested items in the Database and review their summaries")
|
155 |
gr.Markdown("Search by Title / URL / Keyword / or Content via SQLite Full-Text-Search")
|
156 |
with gr.Row():
|
@@ -207,7 +207,7 @@ def create_search_summaries_tab():
|
|
207 |
|
208 |
|
209 |
def create_prompt_search_tab():
|
210 |
-
with gr.TabItem("Search Prompts"):
|
211 |
gr.Markdown("# Search and View Prompt Details")
|
212 |
gr.Markdown("Currently has all of the https://github.com/danielmiessler/fabric prompts already available")
|
213 |
with gr.Row():
|
|
|
80 |
"""
|
81 |
|
82 |
def create_search_tab():
|
83 |
+
with gr.TabItem("Search / Detailed View", visible=True):
|
84 |
gr.Markdown("# Search across all ingested items in the Database")
|
85 |
with gr.Row():
|
86 |
with gr.Column(scale=1):
|
|
|
150 |
|
151 |
|
152 |
def create_search_summaries_tab():
|
153 |
+
with gr.TabItem("Search/View Title+Summary", visible=True):
|
154 |
gr.Markdown("# Search across all ingested items in the Database and review their summaries")
|
155 |
gr.Markdown("Search by Title / URL / Keyword / or Content via SQLite Full-Text-Search")
|
156 |
with gr.Row():
|
|
|
207 |
|
208 |
|
209 |
def create_prompt_search_tab():
|
210 |
+
with gr.TabItem("Search Prompts", visible=True):
|
211 |
gr.Markdown("# Search and View Prompt Details")
|
212 |
gr.Markdown("Currently has all of the https://github.com/danielmiessler/fabric prompts already available")
|
213 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Transcript_comparison.py
CHANGED
@@ -46,7 +46,7 @@ def compare_transcripts(media_id, transcript1_id, transcript2_id):
|
|
46 |
|
47 |
|
48 |
def create_compare_transcripts_tab():
|
49 |
-
with gr.TabItem("Compare Transcripts"):
|
50 |
gr.Markdown("# Compare Transcripts")
|
51 |
|
52 |
with gr.Row():
|
|
|
46 |
|
47 |
|
48 |
def create_compare_transcripts_tab():
|
49 |
+
with gr.TabItem("Compare Transcripts", visible=True):
|
50 |
gr.Markdown("# Compare Transcripts")
|
51 |
|
52 |
with gr.Row():
|
App_Function_Libraries/Gradio_UI/Trash.py
CHANGED
@@ -69,7 +69,7 @@ def mark_item_as_trash(media_id: int) -> str:
|
|
69 |
|
70 |
|
71 |
def create_search_and_mark_trash_tab():
|
72 |
-
with gr.TabItem("Search and Mark as Trash"):
|
73 |
gr.Markdown("# Search for Items and Mark as Trash")
|
74 |
|
75 |
search_input = gr.Textbox(label="Search Query")
|
@@ -105,14 +105,14 @@ def create_search_and_mark_trash_tab():
|
|
105 |
|
106 |
|
107 |
def create_view_trash_tab():
|
108 |
-
with gr.TabItem("View Trash"):
|
109 |
view_button = gr.Button("View Trash")
|
110 |
trash_list = gr.Textbox(label="Trashed Items")
|
111 |
view_button.click(list_trash, inputs=[], outputs=trash_list)
|
112 |
|
113 |
|
114 |
def create_delete_trash_tab():
|
115 |
-
with gr.TabItem("Delete DB Item"):
|
116 |
gr.Markdown("# Delete Items from Databases")
|
117 |
|
118 |
media_id_input = gr.Number(label="Media ID")
|
@@ -128,7 +128,7 @@ def create_delete_trash_tab():
|
|
128 |
|
129 |
|
130 |
def create_empty_trash_tab():
|
131 |
-
with gr.TabItem("Empty Trash"):
|
132 |
days_input = gr.Slider(minimum=15, maximum=90, step=5, label="Delete items older than (days)")
|
133 |
empty_button = gr.Button("Empty Trash")
|
134 |
empty_output = gr.Textbox(label="Result")
|
|
|
69 |
|
70 |
|
71 |
def create_search_and_mark_trash_tab():
|
72 |
+
with gr.TabItem("Search and Mark as Trash", visible=True):
|
73 |
gr.Markdown("# Search for Items and Mark as Trash")
|
74 |
|
75 |
search_input = gr.Textbox(label="Search Query")
|
|
|
105 |
|
106 |
|
107 |
def create_view_trash_tab():
|
108 |
+
with gr.TabItem("View Trash", visible=True):
|
109 |
view_button = gr.Button("View Trash")
|
110 |
trash_list = gr.Textbox(label="Trashed Items")
|
111 |
view_button.click(list_trash, inputs=[], outputs=trash_list)
|
112 |
|
113 |
|
114 |
def create_delete_trash_tab():
|
115 |
+
with gr.TabItem("Delete DB Item", visible=True):
|
116 |
gr.Markdown("# Delete Items from Databases")
|
117 |
|
118 |
media_id_input = gr.Number(label="Media ID")
|
|
|
128 |
|
129 |
|
130 |
def create_empty_trash_tab():
|
131 |
+
with gr.TabItem("Empty Trash", visible=True):
|
132 |
days_input = gr.Slider(minimum=15, maximum=90, step=5, label="Delete items older than (days)")
|
133 |
empty_button = gr.Button("Empty Trash")
|
134 |
empty_output = gr.Textbox(label="Result")
|
App_Function_Libraries/Gradio_UI/Utilities.py
CHANGED
@@ -10,7 +10,7 @@ from App_Function_Libraries.Utils.Utils import sanitize_filename, downloaded_fil
|
|
10 |
|
11 |
|
12 |
def create_utilities_yt_video_tab():
|
13 |
-
with gr.
|
14 |
with gr.Row():
|
15 |
with gr.Column():
|
16 |
gr.Markdown(
|
@@ -28,7 +28,7 @@ def create_utilities_yt_video_tab():
|
|
28 |
)
|
29 |
|
30 |
def create_utilities_yt_audio_tab():
|
31 |
-
with gr.
|
32 |
with gr.Row():
|
33 |
with gr.Column():
|
34 |
gr.Markdown(
|
@@ -48,7 +48,7 @@ def create_utilities_yt_audio_tab():
|
|
48 |
)
|
49 |
|
50 |
def create_utilities_yt_timestamp_tab():
|
51 |
-
with gr.
|
52 |
gr.Markdown("## Generate YouTube URL with Timestamp")
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
|
|
10 |
|
11 |
|
12 |
def create_utilities_yt_video_tab():
|
13 |
+
with gr.TabItem("YouTube Video Downloader", id='youtube_dl', visible=True):
|
14 |
with gr.Row():
|
15 |
with gr.Column():
|
16 |
gr.Markdown(
|
|
|
28 |
)
|
29 |
|
30 |
def create_utilities_yt_audio_tab():
|
31 |
+
with gr.TabItem("YouTube Audio Downloader", id="youtube audio downloader", visible=True):
|
32 |
with gr.Row():
|
33 |
with gr.Column():
|
34 |
gr.Markdown(
|
|
|
48 |
)
|
49 |
|
50 |
def create_utilities_yt_timestamp_tab():
|
51 |
+
with gr.TabItem("YouTube Timestamp URL Generator", id="timestamp-gen", visible=True):
|
52 |
gr.Markdown("## Generate YouTube URL with Timestamp")
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Video_transcription_tab.py
CHANGED
@@ -32,7 +32,7 @@ from App_Function_Libraries.Metrics.metrics_logger import log_counter, log_histo
|
|
32 |
# Functions:
|
33 |
|
34 |
def create_video_transcription_tab():
|
35 |
-
with
|
36 |
gr.Markdown("# Transcribe & Summarize Videos from URLs")
|
37 |
with gr.Row():
|
38 |
gr.Markdown("""Follow this project at [tldw - GitHub](https://github.com/rmusser01/tldw)""")
|
|
|
32 |
# Functions:
|
33 |
|
34 |
def create_video_transcription_tab():
|
35 |
+
with gr.TabItem("Video Transcription + Summarization", visible=True):
|
36 |
gr.Markdown("# Transcribe & Summarize Videos from URLs")
|
37 |
with gr.Row():
|
38 |
gr.Markdown("""Follow this project at [tldw - GitHub](https://github.com/rmusser01/tldw)""")
|
App_Function_Libraries/Gradio_UI/View_DB_Items_tab.py
CHANGED
@@ -18,7 +18,7 @@ from App_Function_Libraries.DB.SQLite_DB import get_document_version
|
|
18 |
# Functions
|
19 |
|
20 |
def create_prompt_view_tab():
|
21 |
-
with gr.TabItem("View Prompt Database"):
|
22 |
gr.Markdown("# View Prompt Database Entries")
|
23 |
with gr.Row():
|
24 |
with gr.Column():
|
@@ -150,7 +150,7 @@ def extract_prompt_and_summary(content: str):
|
|
150 |
|
151 |
|
152 |
def create_view_all_with_versions_tab():
|
153 |
-
with gr.TabItem("View All Items"):
|
154 |
gr.Markdown("# View All Database Entries with Version Selection")
|
155 |
with gr.Row():
|
156 |
with gr.Column(scale=1):
|
@@ -281,7 +281,7 @@ def create_view_all_with_versions_tab():
|
|
281 |
|
282 |
|
283 |
def create_viewing_tab():
|
284 |
-
with gr.TabItem("View Database Entries"):
|
285 |
gr.Markdown("# View Database Entries")
|
286 |
with gr.Row():
|
287 |
with gr.Column():
|
|
|
18 |
# Functions
|
19 |
|
20 |
def create_prompt_view_tab():
|
21 |
+
with gr.TabItem("View Prompt Database", visible=True):
|
22 |
gr.Markdown("# View Prompt Database Entries")
|
23 |
with gr.Row():
|
24 |
with gr.Column():
|
|
|
150 |
|
151 |
|
152 |
def create_view_all_with_versions_tab():
|
153 |
+
with gr.TabItem("View All Items", visible=True):
|
154 |
gr.Markdown("# View All Database Entries with Version Selection")
|
155 |
with gr.Row():
|
156 |
with gr.Column(scale=1):
|
|
|
281 |
|
282 |
|
283 |
def create_viewing_tab():
|
284 |
+
with gr.TabItem("View Database Entries", visible=True):
|
285 |
gr.Markdown("# View Database Entries")
|
286 |
with gr.Row():
|
287 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/View_tab.py
CHANGED
@@ -22,7 +22,7 @@ from App_Function_Libraries.DB.DB_Manager import (
|
|
22 |
|
23 |
# FIXME - Doesn't work. also need ot merge this tab wtih Edit Existing Items tab....
|
24 |
def create_manage_items_tab():
|
25 |
-
with gr.TabItem("Edit/Manage DB Items"):
|
26 |
search_input = gr.Textbox(label="Search for Media (title or ID)")
|
27 |
search_button = gr.Button("Search")
|
28 |
media_selector = gr.Dropdown(label="Select Media", choices=[], interactive=True)
|
|
|
22 |
|
23 |
# FIXME - Doesn't work. also need ot merge this tab wtih Edit Existing Items tab....
|
24 |
def create_manage_items_tab():
|
25 |
+
with gr.TabItem("Edit/Manage DB Items", visible=True):
|
26 |
search_input = gr.Textbox(label="Search for Media (title or ID)")
|
27 |
search_button = gr.Button("Search")
|
28 |
media_selector = gr.Dropdown(label="Select Media", choices=[], interactive=True)
|
App_Function_Libraries/Gradio_UI/Website_scraping_tab.py
CHANGED
@@ -249,7 +249,7 @@ async def scrape_with_retry(url: str, max_retries: int = 3, retry_delay: float =
|
|
249 |
|
250 |
|
251 |
def create_website_scraping_tab():
|
252 |
-
with gr.TabItem("Website Scraping"):
|
253 |
gr.Markdown("# Scrape Websites & Summarize Articles")
|
254 |
with gr.Row():
|
255 |
with gr.Column():
|
|
|
249 |
|
250 |
|
251 |
def create_website_scraping_tab():
|
252 |
+
with gr.TabItem("Website Scraping", visible=True):
|
253 |
gr.Markdown("# Scrape Websites & Summarize Articles")
|
254 |
with gr.Row():
|
255 |
with gr.Column():
|
App_Function_Libraries/Gradio_UI/Writing_tab.py
CHANGED
@@ -41,7 +41,7 @@ def grammar_style_check(input_text, custom_prompt, api_name, api_key, system_pro
|
|
41 |
|
42 |
|
43 |
def create_grammar_style_check_tab():
|
44 |
-
with gr.TabItem("Grammar and Style Check"):
|
45 |
with gr.Row():
|
46 |
with gr.Column():
|
47 |
gr.Markdown("# Grammar and Style Check")
|
@@ -98,7 +98,7 @@ def create_grammar_style_check_tab():
|
|
98 |
|
99 |
|
100 |
def create_tone_adjustment_tab():
|
101 |
-
with gr.TabItem("Tone Analyzer & Editor"):
|
102 |
with gr.Row():
|
103 |
with gr.Column():
|
104 |
input_text = gr.Textbox(label="Input Text", lines=10)
|
@@ -174,7 +174,7 @@ def generate_feedback_history_html(history):
|
|
174 |
|
175 |
# FIXME
|
176 |
def create_document_feedback_tab():
|
177 |
-
with gr.TabItem("Writing Feedback"):
|
178 |
with gr.Row():
|
179 |
with gr.Column(scale=2):
|
180 |
input_text = gr.Textbox(label="Your Writing", lines=10)
|
@@ -364,13 +364,13 @@ def create_document_feedback_tab():
|
|
364 |
|
365 |
|
366 |
def create_creative_writing_tab():
|
367 |
-
with gr.TabItem("Creative Writing Assistant"):
|
368 |
gr.Markdown("# Utility to be added...")
|
369 |
|
370 |
|
371 |
|
372 |
def create_mikupad_tab():
|
373 |
-
with gr.TabItem("Mikupad"):
|
374 |
gr.Markdown("I Wish. Gradio won't embed it successfully...")
|
375 |
|
376 |
#
|
|
|
41 |
|
42 |
|
43 |
def create_grammar_style_check_tab():
|
44 |
+
with gr.TabItem("Grammar and Style Check", visible=True):
|
45 |
with gr.Row():
|
46 |
with gr.Column():
|
47 |
gr.Markdown("# Grammar and Style Check")
|
|
|
98 |
|
99 |
|
100 |
def create_tone_adjustment_tab():
|
101 |
+
with gr.TabItem("Tone Analyzer & Editor", visible=True):
|
102 |
with gr.Row():
|
103 |
with gr.Column():
|
104 |
input_text = gr.Textbox(label="Input Text", lines=10)
|
|
|
174 |
|
175 |
# FIXME
|
176 |
def create_document_feedback_tab():
|
177 |
+
with gr.TabItem("Writing Feedback", visible=True):
|
178 |
with gr.Row():
|
179 |
with gr.Column(scale=2):
|
180 |
input_text = gr.Textbox(label="Your Writing", lines=10)
|
|
|
364 |
|
365 |
|
366 |
def create_creative_writing_tab():
|
367 |
+
with gr.TabItem("Creative Writing Assistant", visible=True):
|
368 |
gr.Markdown("# Utility to be added...")
|
369 |
|
370 |
|
371 |
|
372 |
def create_mikupad_tab():
|
373 |
+
with gr.TabItem("Mikupad", visible=True):
|
374 |
gr.Markdown("I Wish. Gradio won't embed it successfully...")
|
375 |
|
376 |
#
|