Spaces:
Running
Running
oceansweep
commited on
Commit
•
41ba402
1
Parent(s):
6f854c9
Upload 6 files
Browse files
App_Function_Libraries/Gradio_UI/Media_wiki_tab.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Media_wiki_tab.py
|
2 |
+
# Description: Gradio UI snippet that allows users to import a MediaWiki XML dump file into the application.
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
import os
|
6 |
+
from threading import Thread
|
7 |
+
#
|
8 |
+
# 3rd-party Imports
|
9 |
+
import gradio as gr
|
10 |
+
#
|
11 |
+
# Local Imports
|
12 |
+
from App_Function_Libraries.MediaWiki.Media_Wiki import import_mediawiki_dump
|
13 |
+
#
|
14 |
+
#######################################################################################################################
|
15 |
+
#
|
16 |
+
# Create MediaWiki Import Tab
|
17 |
+
|
18 |
+
def create_mediawiki_import_tab():
|
19 |
+
with gr.Tab("MediaWiki Import"):
|
20 |
+
gr.Markdown("# Import MediaWiki Dump")
|
21 |
+
with gr.Row():
|
22 |
+
with gr.Column():
|
23 |
+
file_path = gr.File(label="MediaWiki XML Dump File")
|
24 |
+
wiki_name = gr.Textbox(label="Wiki Name", placeholder="Enter a unique name for this wiki")
|
25 |
+
namespaces = gr.Textbox(label="Namespaces (comma-separated integers, leave empty for all)")
|
26 |
+
skip_redirects = gr.Checkbox(label="Skip Redirects", value=True)
|
27 |
+
single_item = gr.Checkbox(label="Import as Single Item", value=False)
|
28 |
+
chunk_method = gr.Dropdown(
|
29 |
+
choices=["sentences", "words", "paragraphs", "tokens"],
|
30 |
+
value="sentences",
|
31 |
+
label="Chunking Method"
|
32 |
+
)
|
33 |
+
chunk_size = gr.Slider(minimum=100, maximum=2000, value=1000, step=100, label="Chunk Size")
|
34 |
+
chunk_overlap = gr.Slider(minimum=0, maximum=500, value=100, step=10, label="Chunk Overlap")
|
35 |
+
import_button = gr.Button("Import MediaWiki Dump")
|
36 |
+
cancel_button = gr.Button("Cancel Import", visible=False)
|
37 |
+
with gr.Column():
|
38 |
+
output = gr.Markdown(label="Import Status")
|
39 |
+
progress_bar = gr.Progress()
|
40 |
+
|
41 |
+
def validate_inputs(file_path, wiki_name, namespaces):
|
42 |
+
if not file_path:
|
43 |
+
return "Please select a MediaWiki XML dump file."
|
44 |
+
if not wiki_name:
|
45 |
+
return "Please enter a name for the wiki."
|
46 |
+
if namespaces:
|
47 |
+
try:
|
48 |
+
[int(ns.strip()) for ns in namespaces.split(',')]
|
49 |
+
except ValueError:
|
50 |
+
return "Invalid namespaces. Please enter comma-separated integers."
|
51 |
+
return None
|
52 |
+
|
53 |
+
def check_file_size(file_path):
|
54 |
+
max_size_mb = 1000 # 1 GB
|
55 |
+
file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
|
56 |
+
if file_size_mb > max_size_mb:
|
57 |
+
return f"Warning: The selected file is {file_size_mb:.2f} MB. Importing large files may take a long time."
|
58 |
+
return None
|
59 |
+
|
60 |
+
import_thread = None
|
61 |
+
cancel_flag = False
|
62 |
+
|
63 |
+
def run_import(file_path, wiki_name, namespaces, skip_redirects, single_item, chunk_method, chunk_size,
|
64 |
+
chunk_overlap, progress=gr.Progress()):
|
65 |
+
validation_error = validate_inputs(file_path, wiki_name, namespaces)
|
66 |
+
if validation_error:
|
67 |
+
return gr.update(), gr.update(), validation_error
|
68 |
+
|
69 |
+
file_size_warning = check_file_size(file_path.name)
|
70 |
+
status_text = "# MediaWiki Import Process\n\n## Initializing\n- Starting import process...\n"
|
71 |
+
if file_size_warning:
|
72 |
+
status_text += f"- {file_size_warning}\n"
|
73 |
+
|
74 |
+
chunk_options = {
|
75 |
+
'method': chunk_method,
|
76 |
+
'max_size': chunk_size,
|
77 |
+
'overlap': chunk_overlap,
|
78 |
+
'adaptive': True,
|
79 |
+
'language': 'en'
|
80 |
+
}
|
81 |
+
namespaces_list = [int(ns.strip()) for ns in namespaces.split(',')] if namespaces else None
|
82 |
+
|
83 |
+
pages_processed = 0
|
84 |
+
|
85 |
+
try:
|
86 |
+
for progress_info in import_mediawiki_dump(
|
87 |
+
file_path=file_path.name,
|
88 |
+
wiki_name=wiki_name,
|
89 |
+
namespaces=namespaces_list,
|
90 |
+
skip_redirects=skip_redirects,
|
91 |
+
chunk_options=chunk_options,
|
92 |
+
single_item=single_item,
|
93 |
+
progress_callback=progress
|
94 |
+
):
|
95 |
+
if progress_info.startswith("Found"):
|
96 |
+
status_text += f"\n## Parsing\n- {progress_info}\n"
|
97 |
+
elif progress_info.startswith("Processed page"):
|
98 |
+
pages_processed += 1
|
99 |
+
if pages_processed % 10 == 0: # Update every 10 pages to avoid too frequent updates
|
100 |
+
status_text += f"- {progress_info}\n"
|
101 |
+
elif progress_info.startswith("Successfully imported"):
|
102 |
+
status_text += f"\n## Completed\n- {progress_info}\n- Total pages processed: {pages_processed}"
|
103 |
+
else:
|
104 |
+
status_text += f"- {progress_info}\n"
|
105 |
+
|
106 |
+
yield gr.update(), gr.update(), status_text
|
107 |
+
|
108 |
+
status_text += "\n## Import Process Completed Successfully"
|
109 |
+
except Exception as e:
|
110 |
+
status_text += f"\n## Error\n- An error occurred during the import process: {str(e)}"
|
111 |
+
|
112 |
+
yield gr.update(visible=False), gr.update(visible=True), status_text
|
113 |
+
|
114 |
+
def start_import(*args):
|
115 |
+
nonlocal import_thread
|
116 |
+
import_thread = Thread(target=run_import, args=args)
|
117 |
+
import_thread.start()
|
118 |
+
return gr.update(visible=True), gr.update(visible=False), gr.update(
|
119 |
+
value="Import process started. Please wait...")
|
120 |
+
|
121 |
+
def cancel_import():
|
122 |
+
nonlocal cancel_flag
|
123 |
+
cancel_flag = True
|
124 |
+
return gr.update(visible=False), gr.update(visible=True)
|
125 |
+
|
126 |
+
import_button.click(
|
127 |
+
run_import,
|
128 |
+
inputs=[file_path, wiki_name, namespaces, skip_redirects, single_item, chunk_method, chunk_size,
|
129 |
+
chunk_overlap],
|
130 |
+
outputs=[cancel_button, import_button, output]
|
131 |
+
)
|
132 |
+
|
133 |
+
cancel_button.click(
|
134 |
+
cancel_import,
|
135 |
+
outputs=[cancel_button, import_button]
|
136 |
+
)
|
137 |
+
|
138 |
+
return file_path, wiki_name, namespaces, skip_redirects, single_item, chunk_method, chunk_size, chunk_overlap, import_button, output
|
139 |
+
|
140 |
+
#
|
141 |
+
# End of MediaWiki Import Tab
|
142 |
+
#######################################################################################################################
|
App_Function_Libraries/Gradio_UI/RAG_Chat_tab.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Rag_Chat_tab.py
|
2 |
+
# Description: This file contains the code for the RAG Chat tab in the Gradio UI
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
import logging
|
6 |
+
#
|
7 |
+
# External Imports
|
8 |
+
import gradio as gr
|
9 |
+
#
|
10 |
+
# Local Imports
|
11 |
+
from App_Function_Libraries.DB.DB_Manager import get_all_content_from_database
|
12 |
+
from App_Function_Libraries.RAG.ChromaDB_Library import chroma_client, \
|
13 |
+
check_embedding_status, store_in_chroma
|
14 |
+
from App_Function_Libraries.RAG.Embeddings_Create import create_embedding
|
15 |
+
from App_Function_Libraries.RAG.RAG_Libary_2 import enhanced_rag_pipeline
|
16 |
+
#
|
17 |
+
########################################################################################################################
|
18 |
+
#
|
19 |
+
# Functions:
|
20 |
+
|
21 |
+
def create_rag_tab():
|
22 |
+
with gr.TabItem("RAG Search"):
|
23 |
+
gr.Markdown("# Retrieval-Augmented Generation (RAG) Search")
|
24 |
+
|
25 |
+
with gr.Row():
|
26 |
+
with gr.Column():
|
27 |
+
search_query = gr.Textbox(label="Enter your question", placeholder="What would you like to know?")
|
28 |
+
|
29 |
+
keyword_filtering_checkbox = gr.Checkbox(label="Enable Keyword Filtering", value=False)
|
30 |
+
|
31 |
+
keywords_input = gr.Textbox(
|
32 |
+
label="Enter keywords (comma-separated)",
|
33 |
+
value="keyword1, keyword2, ...",
|
34 |
+
visible=False
|
35 |
+
)
|
36 |
+
|
37 |
+
keyword_instructions = gr.Markdown(
|
38 |
+
"Enter comma-separated keywords to filter your search results.",
|
39 |
+
visible=False
|
40 |
+
)
|
41 |
+
|
42 |
+
api_choice = gr.Dropdown(
|
43 |
+
choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"],
|
44 |
+
label="Select API for RAG",
|
45 |
+
value="OpenAI"
|
46 |
+
)
|
47 |
+
search_button = gr.Button("Search")
|
48 |
+
|
49 |
+
with gr.Column():
|
50 |
+
result_output = gr.Textbox(label="Answer", lines=10)
|
51 |
+
context_output = gr.Textbox(label="Context", lines=10, visible=True)
|
52 |
+
|
53 |
+
def toggle_keyword_filtering(checkbox_value):
|
54 |
+
return {
|
55 |
+
keywords_input: gr.update(visible=checkbox_value),
|
56 |
+
keyword_instructions: gr.update(visible=checkbox_value)
|
57 |
+
}
|
58 |
+
|
59 |
+
keyword_filtering_checkbox.change(
|
60 |
+
toggle_keyword_filtering,
|
61 |
+
inputs=[keyword_filtering_checkbox],
|
62 |
+
outputs=[keywords_input, keyword_instructions]
|
63 |
+
)
|
64 |
+
|
65 |
+
def perform_rag_search(query, keywords, api_choice):
|
66 |
+
if keywords == "keyword1, keyword2, ...":
|
67 |
+
keywords = None
|
68 |
+
result = enhanced_rag_pipeline(query, api_choice, keywords)
|
69 |
+
return result['answer'], result['context']
|
70 |
+
|
71 |
+
search_button.click(perform_rag_search, inputs=[search_query, keywords_input, api_choice], outputs=[result_output, context_output])
|
72 |
+
|
73 |
+
|
74 |
+
# FIXME - under construction
|
75 |
+
def create_embeddings_tab():
|
76 |
+
with gr.TabItem("Create Embeddings"):
|
77 |
+
gr.Markdown("# Create Embeddings for All Content")
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
with gr.Column():
|
81 |
+
embedding_provider = gr.Radio(
|
82 |
+
choices=["openai", "local", "huggingface"],
|
83 |
+
label="Select Embedding Provider",
|
84 |
+
value="openai"
|
85 |
+
)
|
86 |
+
embedding_model = gr.Textbox(
|
87 |
+
label="Embedding Model",
|
88 |
+
value="text-embedding-3-small"
|
89 |
+
)
|
90 |
+
embedding_api_url = gr.Textbox(
|
91 |
+
label="API URL (for local provider)",
|
92 |
+
value="http://localhost:8080/embedding",
|
93 |
+
visible=False
|
94 |
+
)
|
95 |
+
create_button = gr.Button("Create Embeddings")
|
96 |
+
|
97 |
+
with gr.Column():
|
98 |
+
status_output = gr.Textbox(label="Status", lines=10)
|
99 |
+
|
100 |
+
def update_provider_options(provider):
|
101 |
+
return gr.update(visible=provider == "local")
|
102 |
+
|
103 |
+
embedding_provider.change(
|
104 |
+
fn=update_provider_options,
|
105 |
+
inputs=[embedding_provider],
|
106 |
+
outputs=[embedding_api_url]
|
107 |
+
)
|
108 |
+
|
109 |
+
def create_all_embeddings(provider, model, api_url):
|
110 |
+
try:
|
111 |
+
all_content = get_all_content_from_database()
|
112 |
+
if not all_content:
|
113 |
+
return "No content found in the database."
|
114 |
+
|
115 |
+
collection_name = "all_content_embeddings"
|
116 |
+
collection = chroma_client.get_or_create_collection(name=collection_name)
|
117 |
+
|
118 |
+
for item in all_content:
|
119 |
+
media_id = item['id']
|
120 |
+
text = item['content']
|
121 |
+
|
122 |
+
existing = collection.get(ids=[f"doc_{media_id}"])
|
123 |
+
if existing['ids']:
|
124 |
+
continue
|
125 |
+
|
126 |
+
embedding = create_embedding(text, provider, model, api_url)
|
127 |
+
store_in_chroma(collection_name, [text], [embedding], [f"doc_{media_id}"], [{"media_id": media_id}])
|
128 |
+
|
129 |
+
return "Embeddings created and stored successfully for all new content."
|
130 |
+
except Exception as e:
|
131 |
+
logging.error(f"Error during embedding creation: {str(e)}")
|
132 |
+
return f"Error: {str(e)}"
|
133 |
+
|
134 |
+
create_button.click(
|
135 |
+
fn=create_all_embeddings,
|
136 |
+
inputs=[embedding_provider, embedding_model, embedding_api_url],
|
137 |
+
outputs=status_output
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
def create_view_embeddings_tab():
|
142 |
+
with gr.TabItem("View/Update Embeddings"):
|
143 |
+
gr.Markdown("# View and Update Embeddings")
|
144 |
+
item_mapping = gr.State({})
|
145 |
+
with gr.Row():
|
146 |
+
with gr.Column():
|
147 |
+
item_dropdown = gr.Dropdown(label="Select Item", choices=[], interactive=True)
|
148 |
+
refresh_button = gr.Button("Refresh Item List")
|
149 |
+
embedding_status = gr.Textbox(label="Embedding Status", interactive=False)
|
150 |
+
embedding_preview = gr.Textbox(label="Embedding Preview", interactive=False, lines=5)
|
151 |
+
|
152 |
+
with gr.Column():
|
153 |
+
create_new_embedding_button = gr.Button("Create New Embedding")
|
154 |
+
embedding_provider = gr.Radio(
|
155 |
+
choices=["openai", "local", "huggingface"],
|
156 |
+
label="Embedding Provider",
|
157 |
+
value="openai"
|
158 |
+
)
|
159 |
+
embedding_model = gr.Textbox(
|
160 |
+
label="Embedding Model",
|
161 |
+
value="text-embedding-3-small",
|
162 |
+
visible=True
|
163 |
+
)
|
164 |
+
embedding_api_url = gr.Textbox(
|
165 |
+
label="API URL (for local provider)",
|
166 |
+
value="http://localhost:8080/embedding",
|
167 |
+
visible=False
|
168 |
+
)
|
169 |
+
|
170 |
+
def get_items_with_embedding_status():
|
171 |
+
try:
|
172 |
+
items = get_all_content_from_database()
|
173 |
+
collection = chroma_client.get_or_create_collection(name="all_content_embeddings")
|
174 |
+
choices = []
|
175 |
+
new_item_mapping = {}
|
176 |
+
for item in items:
|
177 |
+
try:
|
178 |
+
result = collection.get(ids=[f"doc_{item['id']}"])
|
179 |
+
embedding_exists = result is not None and result.get('ids') and len(result['ids']) > 0
|
180 |
+
status = "Embedding exists" if embedding_exists else "No embedding"
|
181 |
+
except Exception as e:
|
182 |
+
print(f"Error checking embedding for item {item['id']}: {str(e)}")
|
183 |
+
status = "Error checking"
|
184 |
+
choice = f"{item['title']} ({status})"
|
185 |
+
choices.append(choice)
|
186 |
+
new_item_mapping[choice] = item['id']
|
187 |
+
return gr.update(choices=choices), new_item_mapping
|
188 |
+
except Exception as e:
|
189 |
+
print(f"Error in get_items_with_embedding_status: {str(e)}")
|
190 |
+
return gr.update(choices=["Error: Unable to fetch items"]), {}
|
191 |
+
|
192 |
+
def update_provider_options(provider):
|
193 |
+
return gr.update(visible=provider == "local")
|
194 |
+
|
195 |
+
def create_new_embedding_for_item(selected_item, provider, model, api_url, item_mapping):
|
196 |
+
if not selected_item:
|
197 |
+
return "Please select an item", ""
|
198 |
+
|
199 |
+
try:
|
200 |
+
item_id = item_mapping.get(selected_item)
|
201 |
+
if item_id is None:
|
202 |
+
return f"Invalid item selected: {selected_item}", ""
|
203 |
+
|
204 |
+
items = get_all_content_from_database()
|
205 |
+
item = next((item for item in items if item['id'] == item_id), None)
|
206 |
+
if not item:
|
207 |
+
return f"Item not found: {item_id}", ""
|
208 |
+
|
209 |
+
embedding = create_embedding(item['content'], provider, model, api_url)
|
210 |
+
|
211 |
+
collection_name = "all_content_embeddings"
|
212 |
+
metadata = {"media_id": item_id, "title": item['title']}
|
213 |
+
store_in_chroma(collection_name, [item['content']], [embedding], [f"doc_{item_id}"],
|
214 |
+
[{"media_id": item_id, "title": item['title']}])
|
215 |
+
|
216 |
+
embedding_preview = str(embedding[:50])
|
217 |
+
status = f"New embedding created and stored for item: {item['title']} (ID: {item_id})"
|
218 |
+
return status, f"First 50 elements of new embedding:\n{embedding_preview}\n\nMetadata: {metadata}"
|
219 |
+
except Exception as e:
|
220 |
+
logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
|
221 |
+
return f"Error creating embedding: {str(e)}", ""
|
222 |
+
|
223 |
+
refresh_button.click(
|
224 |
+
get_items_with_embedding_status,
|
225 |
+
outputs=[item_dropdown, item_mapping]
|
226 |
+
)
|
227 |
+
item_dropdown.change(
|
228 |
+
check_embedding_status,
|
229 |
+
inputs=[item_dropdown, item_mapping],
|
230 |
+
outputs=[embedding_status, embedding_preview]
|
231 |
+
)
|
232 |
+
create_new_embedding_button.click(
|
233 |
+
create_new_embedding_for_item,
|
234 |
+
inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url, item_mapping],
|
235 |
+
outputs=[embedding_status, embedding_preview]
|
236 |
+
)
|
237 |
+
embedding_provider.change(
|
238 |
+
update_provider_options,
|
239 |
+
inputs=[embedding_provider],
|
240 |
+
outputs=[embedding_api_url]
|
241 |
+
)
|
242 |
+
|
243 |
+
return item_dropdown, refresh_button, embedding_status, embedding_preview, create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url
|
244 |
+
|
245 |
+
#
|
246 |
+
# End of file
|
247 |
+
########################################################################################################################
|
248 |
+
|
App_Function_Libraries/Gradio_UI/RAG_QA_Chat_tab.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# RAG_QA_Chat_tab.py
|
2 |
+
# Description: Gradio UI for RAG QA Chat
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
#
|
6 |
+
# External Imports
|
7 |
+
import logging
|
8 |
+
|
9 |
+
import gradio as gr
|
10 |
+
|
11 |
+
from App_Function_Libraries.DB.DB_Manager import DatabaseError, get_paginated_files
|
12 |
+
from App_Function_Libraries.RAG.RAG_QA_Chat import search_database, load_chat_history, \
|
13 |
+
save_chat_history, rag_qa_chat
|
14 |
+
|
15 |
+
|
16 |
+
#
|
17 |
+
# Local Imports
|
18 |
+
#
|
19 |
+
########################################################################################################################
|
20 |
+
#
|
21 |
+
# Functions:
|
22 |
+
|
23 |
+
def create_rag_qa_chat_tab():
|
24 |
+
with gr.TabItem("RAG QA Chat (WIP)"):
|
25 |
+
gr.Markdown("# RAG QA Chat")
|
26 |
+
|
27 |
+
with gr.Row():
|
28 |
+
with gr.Column(scale=1):
|
29 |
+
context_source = gr.Radio(
|
30 |
+
["Existing File", "Search Database", "Upload File"],
|
31 |
+
label="Context Source",
|
32 |
+
value="Existing File"
|
33 |
+
)
|
34 |
+
existing_file = gr.Dropdown(label="Select Existing File", choices=[], interactive=True)
|
35 |
+
file_page = gr.State(value=1)
|
36 |
+
with gr.Row():
|
37 |
+
prev_page_btn = gr.Button("Previous Page")
|
38 |
+
next_page_btn = gr.Button("Next Page")
|
39 |
+
page_info = gr.HTML("Page 1")
|
40 |
+
|
41 |
+
search_query = gr.Textbox(label="Search Query", visible=False)
|
42 |
+
search_button = gr.Button("Search", visible=False)
|
43 |
+
search_results = gr.Dropdown(label="Search Results", choices=[], visible=False)
|
44 |
+
file_upload = gr.File(label="Upload File", visible=False)
|
45 |
+
|
46 |
+
api_choice = gr.Dropdown(
|
47 |
+
choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"],
|
48 |
+
label="Select API for RAG",
|
49 |
+
value="OpenAI"
|
50 |
+
)
|
51 |
+
chat_file = gr.File(label="Chat File")
|
52 |
+
load_chat = gr.Button("Load Chat")
|
53 |
+
clear = gr.Button("Clear Current Chat")
|
54 |
+
|
55 |
+
with gr.Column(scale=2):
|
56 |
+
chatbot = gr.Chatbot(height=500)
|
57 |
+
msg = gr.Textbox(label="Enter your message")
|
58 |
+
submit = gr.Button("Submit")
|
59 |
+
|
60 |
+
save_chat = gr.Button("Save Chat")
|
61 |
+
|
62 |
+
|
63 |
+
loading_indicator = gr.HTML(visible=False)
|
64 |
+
|
65 |
+
def update_file_list(page):
|
66 |
+
files, total_pages, current_page = get_paginated_files(page)
|
67 |
+
choices = [f"{title} (ID: {id})" for id, title in files]
|
68 |
+
return gr.update(choices=choices), gr.update(value=f"Page {current_page} of {total_pages}"), current_page
|
69 |
+
|
70 |
+
def next_page_fn(current_page):
|
71 |
+
return update_file_list(current_page + 1)
|
72 |
+
|
73 |
+
def prev_page_fn(current_page):
|
74 |
+
return update_file_list(max(1, current_page - 1))
|
75 |
+
|
76 |
+
def update_context_source(choice):
|
77 |
+
return {
|
78 |
+
existing_file: gr.update(visible=choice == "Existing File"),
|
79 |
+
prev_page_btn: gr.update(visible=choice == "Existing File"),
|
80 |
+
next_page_btn: gr.update(visible=choice == "Existing File"),
|
81 |
+
page_info: gr.update(visible=choice == "Existing File"),
|
82 |
+
search_query: gr.update(visible=choice == "Search Database"),
|
83 |
+
search_button: gr.update(visible=choice == "Search Database"),
|
84 |
+
search_results: gr.update(visible=choice == "Search Database"),
|
85 |
+
file_upload: gr.update(visible=choice == "Upload File")
|
86 |
+
}
|
87 |
+
|
88 |
+
context_source.change(update_context_source, context_source,
|
89 |
+
[existing_file, prev_page_btn, next_page_btn, page_info, search_query, search_button,
|
90 |
+
search_results, file_upload])
|
91 |
+
|
92 |
+
next_page_btn.click(next_page_fn, inputs=[file_page], outputs=[existing_file, page_info, file_page])
|
93 |
+
prev_page_btn.click(prev_page_fn, inputs=[file_page], outputs=[existing_file, page_info, file_page])
|
94 |
+
|
95 |
+
# Initialize the file list
|
96 |
+
context_source.change(lambda: update_file_list(1), outputs=[existing_file, page_info, file_page])
|
97 |
+
|
98 |
+
loading_indicator = gr.HTML(visible=False)
|
99 |
+
|
100 |
+
def rag_qa_chat_wrapper(message, history, context_source, existing_file, search_results, file_upload,
|
101 |
+
api_choice):
|
102 |
+
try:
|
103 |
+
# Show loading indicator
|
104 |
+
yield history, "", gr.update(visible=True)
|
105 |
+
|
106 |
+
if context_source == "Existing File":
|
107 |
+
context = f"media_id:{existing_file.split('(ID: ')[1][:-1]}"
|
108 |
+
elif context_source == "Search Database":
|
109 |
+
context = f"media_id:{search_results.split('(ID: ')[1][:-1]}"
|
110 |
+
else: # Upload File
|
111 |
+
if file_upload is None:
|
112 |
+
raise ValueError("No file uploaded")
|
113 |
+
context = file_upload
|
114 |
+
|
115 |
+
new_history, response = rag_qa_chat(message, history, context, api_choice)
|
116 |
+
gr.Info("Response generated successfully")
|
117 |
+
yield new_history, "", gr.update(visible=False)
|
118 |
+
except ValueError as e:
|
119 |
+
gr.Error(f"Input error: {str(e)}")
|
120 |
+
yield history, "", gr.update(visible=False)
|
121 |
+
except DatabaseError as e:
|
122 |
+
gr.Error(f"Database error: {str(e)}")
|
123 |
+
yield history, "", gr.update(visible=False)
|
124 |
+
except Exception as e:
|
125 |
+
logging.error(f"Unexpected error in rag_qa_chat_wrapper: {e}")
|
126 |
+
gr.Error("An unexpected error occurred. Please try again later.")
|
127 |
+
yield history, "", gr.update(visible=False)
|
128 |
+
|
129 |
+
def save_chat_history_wrapper(history):
|
130 |
+
try:
|
131 |
+
file_path = save_chat_history(history)
|
132 |
+
gr.Info("Chat history saved successfully")
|
133 |
+
return gr.update(value=file_path)
|
134 |
+
except Exception as e:
|
135 |
+
gr.Error(f"Error saving chat history: {str(e)}")
|
136 |
+
return gr.update(value=None)
|
137 |
+
|
138 |
+
def load_chat_history_wrapper(file):
|
139 |
+
try:
|
140 |
+
if file is not None:
|
141 |
+
history = load_chat_history(file)
|
142 |
+
gr.Info("Chat history loaded successfully")
|
143 |
+
return history
|
144 |
+
return []
|
145 |
+
except Exception as e:
|
146 |
+
gr.Error(f"Error loading chat history: {str(e)}")
|
147 |
+
return []
|
148 |
+
|
149 |
+
def perform_search(query):
|
150 |
+
try:
|
151 |
+
results = search_database(query)
|
152 |
+
return gr.update(choices=results)
|
153 |
+
except Exception as e:
|
154 |
+
gr.Error(f"Error performing search: {str(e)}")
|
155 |
+
return gr.update(choices=[])
|
156 |
+
|
157 |
+
save_chat.click(save_chat_history_wrapper, inputs=[chatbot], outputs=[chat_file])
|
158 |
+
load_chat.click(load_chat_history_wrapper, inputs=[chat_file], outputs=[chatbot])
|
159 |
+
|
160 |
+
search_button.click(perform_search, inputs=[search_query], outputs=[search_results])
|
161 |
+
|
162 |
+
submit.click(
|
163 |
+
rag_qa_chat_wrapper,
|
164 |
+
inputs=[msg, chatbot, context_source, existing_file, search_results, file_upload, api_choice],
|
165 |
+
outputs=[chatbot, msg, loading_indicator]
|
166 |
+
)
|
167 |
+
|
168 |
+
clear.click(lambda: ([], None), outputs=[chatbot, chat_file])
|
169 |
+
|
170 |
+
return context_source, existing_file, search_query, search_button, search_results, file_upload, api_choice, chatbot, msg, submit, clear, save_chat, load_chat, chat_file
|
171 |
+
|
172 |
+
#
|
173 |
+
# End of RAG_QA_Chat_tab.py
|
174 |
+
########################################################################################################################
|
175 |
+
#
|
App_Function_Libraries/Gradio_UI/Writing_tab.py
ADDED
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Writing_tab.py
|
2 |
+
# Description: This file contains the functions that are used for writing in the Gradio UI.
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
import base64
|
6 |
+
from datetime import datetime as datetime
|
7 |
+
import logging
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
#
|
11 |
+
# External Imports
|
12 |
+
import gradio as gr
|
13 |
+
from PIL import Image
|
14 |
+
import textstat
|
15 |
+
#
|
16 |
+
# Local Imports
|
17 |
+
from App_Function_Libraries.Summarization_General_Lib import perform_summarization
|
18 |
+
from App_Function_Libraries.Chat import chat
|
19 |
+
#
|
20 |
+
########################################################################################################################
|
21 |
+
#
|
22 |
+
# Functions:
|
23 |
+
|
24 |
+
def adjust_tone(text, concise, casual, api_name, api_key):
|
25 |
+
tones = [
|
26 |
+
{"tone": "concise", "weight": concise},
|
27 |
+
{"tone": "casual", "weight": casual},
|
28 |
+
{"tone": "professional", "weight": 1 - casual},
|
29 |
+
{"tone": "expanded", "weight": 1 - concise}
|
30 |
+
]
|
31 |
+
tones = sorted(tones, key=lambda x: x['weight'], reverse=True)[:2]
|
32 |
+
|
33 |
+
tone_prompt = " and ".join([f"{t['tone']} (weight: {t['weight']:.2f})" for t in tones])
|
34 |
+
|
35 |
+
prompt = f"Rewrite the following text to match these tones: {tone_prompt}. Text: {text}"
|
36 |
+
# Performing tone adjustment request...
|
37 |
+
adjusted_text = perform_summarization(api_name, text, prompt, api_key)
|
38 |
+
|
39 |
+
return adjusted_text
|
40 |
+
|
41 |
+
|
42 |
+
def grammar_style_check(input_text, custom_prompt, api_name, api_key, system_prompt):
|
43 |
+
default_prompt = "Please analyze the following text for grammar and style. Offer suggestions for improvement and point out any misused words or incorrect spellings:\n\n"
|
44 |
+
full_prompt = custom_prompt if custom_prompt else default_prompt
|
45 |
+
full_text = full_prompt + input_text
|
46 |
+
|
47 |
+
return perform_summarization(api_name, full_text, custom_prompt, api_key, system_prompt)
|
48 |
+
|
49 |
+
|
50 |
+
def create_grammar_style_check_tab():
|
51 |
+
with gr.TabItem("Grammar and Style Check"):
|
52 |
+
with gr.Row():
|
53 |
+
with gr.Column():
|
54 |
+
gr.Markdown("# Grammar and Style Check")
|
55 |
+
gr.Markdown("This utility checks the grammar and style of the provided text by feeding it to an LLM and returning suggestions for improvement.")
|
56 |
+
input_text = gr.Textbox(label="Input Text", lines=10)
|
57 |
+
custom_prompt_checkbox = gr.Checkbox(label="Use Custom Prompt", value=False, visible=True)
|
58 |
+
system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Please analyze the provided text for grammar and style. Offer any suggestions or points to improve you can identify. Additionally please point out any misuses of any words or incorrect spellings.", lines=5, visible=False)
|
59 |
+
custom_prompt_input = gr.Textbox(label="user Prompt",
|
60 |
+
value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
|
61 |
+
**Bulleted Note Creation Guidelines**
|
62 |
+
|
63 |
+
**Headings**:
|
64 |
+
- Based on referenced topics, not categories like quotes or terms
|
65 |
+
- Surrounded by **bold** formatting
|
66 |
+
- Not listed as bullet points
|
67 |
+
- No space between headings and list items underneath
|
68 |
+
|
69 |
+
**Emphasis**:
|
70 |
+
- **Important terms** set in bold font
|
71 |
+
- **Text ending in a colon**: also bolded
|
72 |
+
|
73 |
+
**Review**:
|
74 |
+
- Ensure adherence to specified format
|
75 |
+
- Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
|
76 |
+
""",
|
77 |
+
lines=3,
|
78 |
+
visible=False)
|
79 |
+
custom_prompt_checkbox.change(
|
80 |
+
fn=lambda x: (gr.update(visible=x), gr.update(visible=x)),
|
81 |
+
inputs=[custom_prompt_checkbox],
|
82 |
+
outputs=[custom_prompt_input, system_prompt_input]
|
83 |
+
)
|
84 |
+
api_name_input = gr.Dropdown(
|
85 |
+
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
|
86 |
+
"Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace", "Custom-OpenAI-API"],
|
87 |
+
value=None,
|
88 |
+
label="API for Grammar Check"
|
89 |
+
)
|
90 |
+
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)", placeholder="Enter your API key here",
|
91 |
+
type="password")
|
92 |
+
check_grammar_button = gr.Button("Check Grammar and Style")
|
93 |
+
|
94 |
+
with gr.Column():
|
95 |
+
gr.Markdown("# Resulting Suggestions")
|
96 |
+
gr.Markdown("(Keep in mind the API used can affect the quality of the suggestions)")
|
97 |
+
|
98 |
+
output_text = gr.Textbox(label="Grammar and Style Suggestions", lines=15)
|
99 |
+
|
100 |
+
check_grammar_button.click(
|
101 |
+
fn=grammar_style_check,
|
102 |
+
inputs=[input_text, custom_prompt_input, api_name_input, api_key_input, system_prompt_input],
|
103 |
+
outputs=output_text
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
def create_tone_adjustment_tab():
|
108 |
+
with gr.TabItem("Tone Analyzer & Editor"):
|
109 |
+
with gr.Row():
|
110 |
+
with gr.Column():
|
111 |
+
input_text = gr.Textbox(label="Input Text", lines=10)
|
112 |
+
concise_slider = gr.Slider(minimum=0, maximum=1, value=0.5, label="Concise vs Expanded")
|
113 |
+
casual_slider = gr.Slider(minimum=0, maximum=1, value=0.5, label="Casual vs Professional")
|
114 |
+
api_name_input = gr.Dropdown(
|
115 |
+
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
|
116 |
+
"Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace", "Custom-OpenAI-API"],
|
117 |
+
value=None,
|
118 |
+
label="API for Grammar Check"
|
119 |
+
)
|
120 |
+
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)", placeholder="Enter your API key here",
|
121 |
+
type="password")
|
122 |
+
adjust_btn = gr.Button("Adjust Tone")
|
123 |
+
|
124 |
+
with gr.Column():
|
125 |
+
output_text = gr.Textbox(label="Adjusted Text", lines=15)
|
126 |
+
|
127 |
+
adjust_btn.click(
|
128 |
+
adjust_tone,
|
129 |
+
inputs=[input_text, concise_slider, casual_slider],
|
130 |
+
outputs=output_text
|
131 |
+
)
|
132 |
+
|
133 |
+
|
134 |
+
persona_prompts = {
|
135 |
+
"Hemingway": "As Ernest Hemingway, known for concise and straightforward prose, provide feedback on the following text:",
|
136 |
+
"Shakespeare": "Channel William Shakespeare's poetic style and provide feedback on the following text:",
|
137 |
+
"Jane Austen": "Embodying Jane Austen's wit and social commentary, critique the following text:",
|
138 |
+
"Stephen King": "With Stephen King's flair for suspense and horror, analyze the following text:",
|
139 |
+
"J.K. Rowling": "As J.K. Rowling, creator of the magical world of Harry Potter, review the following text:"
|
140 |
+
}
|
141 |
+
|
142 |
+
def generate_writing_feedback(text, persona, aspect, api_name, api_key):
|
143 |
+
if isinstance(persona, dict): # If it's a character card
|
144 |
+
base_prompt = f"You are {persona['name']}. {persona['personality']}\n\nScenario: {persona['scenario']}\n\nRespond to the following message in character:"
|
145 |
+
else: # If it's a regular persona
|
146 |
+
base_prompt = persona_prompts.get(persona, f"As {persona}, provide feedback on the following text:")
|
147 |
+
|
148 |
+
if aspect != "Overall":
|
149 |
+
prompt = f"{base_prompt}\n\nFocus specifically on the {aspect.lower()} in the following text:\n\n{text}"
|
150 |
+
else:
|
151 |
+
prompt = f"{base_prompt}\n\n{text}"
|
152 |
+
|
153 |
+
return perform_summarization(api_name, text, prompt, api_key, system_message="You are a helpful AI assistant. You will respond to the user as if you were the persona declared in the user prompt.")
|
154 |
+
|
155 |
+
def generate_writing_prompt(persona, api_name, api_key):
|
156 |
+
prompt = f"Generate a writing prompt in the style of {persona}. The prompt should inspire a short story or scene that reflects {persona}'s typical themes and writing style."
|
157 |
+
#FIXME
|
158 |
+
return perform_summarization(api_name, prompt, "", api_key, system_message="You are a helpful AI assistant. You will respond to the user as if you were the persona declared in the user prompt." )
|
159 |
+
|
160 |
+
def calculate_readability(text):
|
161 |
+
ease = textstat.flesch_reading_ease(text)
|
162 |
+
grade = textstat.flesch_kincaid_grade(text)
|
163 |
+
return f"Readability: Flesch Reading Ease: {ease:.2f}, Flesch-Kincaid Grade Level: {grade:.2f}"
|
164 |
+
|
165 |
+
|
166 |
+
def generate_feedback_history_html(history):
|
167 |
+
html = "<h3>Recent Feedback History</h3>"
|
168 |
+
for entry in reversed(history):
|
169 |
+
html += f"<details><summary>{entry['persona']} Feedback</summary>"
|
170 |
+
html += f"<p><strong>Original Text:</strong> {entry['text'][:100]}...</p>"
|
171 |
+
|
172 |
+
feedback = entry.get('feedback')
|
173 |
+
if feedback:
|
174 |
+
html += f"<p><strong>Feedback:</strong> {feedback[:200]}...</p>"
|
175 |
+
else:
|
176 |
+
html += "<p><strong>Feedback:</strong> No feedback provided.</p>"
|
177 |
+
|
178 |
+
html += "</details>"
|
179 |
+
return html
|
180 |
+
|
181 |
+
|
182 |
+
# FIXME
|
183 |
+
def create_document_feedback_tab():
|
184 |
+
with gr.TabItem("Writing Feedback"):
|
185 |
+
with gr.Row():
|
186 |
+
with gr.Column(scale=2):
|
187 |
+
input_text = gr.Textbox(label="Your Writing", lines=10)
|
188 |
+
persona_dropdown = gr.Dropdown(
|
189 |
+
label="Select Persona",
|
190 |
+
choices=[
|
191 |
+
"Agatha Christie",
|
192 |
+
"Arthur Conan Doyle",
|
193 |
+
"Charles Bukowski",
|
194 |
+
"Charles Dickens",
|
195 |
+
"Chinua Achebe",
|
196 |
+
"Cormac McCarthy",
|
197 |
+
"David Foster Wallace",
|
198 |
+
"Edgar Allan Poe",
|
199 |
+
"F. Scott Fitzgerald",
|
200 |
+
"Flannery O'Connor",
|
201 |
+
"Franz Kafka",
|
202 |
+
"Fyodor Dostoevsky",
|
203 |
+
"Gabriel Garcia Marquez",
|
204 |
+
"George R.R. Martin",
|
205 |
+
"George Orwell",
|
206 |
+
"Haruki Murakami",
|
207 |
+
"Hemingway",
|
208 |
+
"Herman Melville",
|
209 |
+
"Isabel Allende",
|
210 |
+
"James Joyce",
|
211 |
+
"Jane Austen",
|
212 |
+
"J.K. Rowling",
|
213 |
+
"J.R.R. Tolkien",
|
214 |
+
"Jorge Luis Borges",
|
215 |
+
"Kurt Vonnegut",
|
216 |
+
"Leo Tolstoy",
|
217 |
+
"Margaret Atwood",
|
218 |
+
"Mark Twain",
|
219 |
+
"Mary Shelley",
|
220 |
+
"Milan Kundera",
|
221 |
+
"Naguib Mahfouz",
|
222 |
+
"Neil Gaiman",
|
223 |
+
"Octavia Butler",
|
224 |
+
"Philip K Dick",
|
225 |
+
"Ray Bradbury",
|
226 |
+
"Salman Rushdie",
|
227 |
+
"Shakespeare",
|
228 |
+
"Stephen King",
|
229 |
+
"Toni Morrison",
|
230 |
+
"T.S. Eliot",
|
231 |
+
"Ursula K. Le Guin",
|
232 |
+
"Virginia Woolf",
|
233 |
+
"Virginia Woolf",
|
234 |
+
"Zadie Smith"],
|
235 |
+
value="Hemingway"
|
236 |
+
)
|
237 |
+
custom_persona_name = gr.Textbox(label="Custom Persona Name")
|
238 |
+
custom_persona_description = gr.Textbox(label="Custom Persona Description", lines=3)
|
239 |
+
add_custom_persona_button = gr.Button("Add Custom Persona")
|
240 |
+
aspect_dropdown = gr.Dropdown(
|
241 |
+
label="Focus Feedback On",
|
242 |
+
choices=["Overall", "Grammar", "Word choice", "Structure of delivery", "Character Development", "Character Dialogue", "Descriptive Language", "Plot Structure"],
|
243 |
+
value="Overall"
|
244 |
+
)
|
245 |
+
api_name_input = gr.Dropdown(
|
246 |
+
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
|
247 |
+
"Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
|
248 |
+
value=None,
|
249 |
+
label="API for Feedback"
|
250 |
+
)
|
251 |
+
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)", type="password")
|
252 |
+
get_feedback_button = gr.Button("Get Feedback")
|
253 |
+
generate_prompt_button = gr.Button("Generate Writing Prompt")
|
254 |
+
|
255 |
+
with gr.Column(scale=2):
|
256 |
+
feedback_output = gr.Textbox(label="Feedback", lines=15)
|
257 |
+
readability_output = gr.Textbox(label="Readability Metrics")
|
258 |
+
feedback_history_display = gr.HTML(label="Feedback History")
|
259 |
+
|
260 |
+
with gr.Row():
|
261 |
+
compare_personas = gr.CheckboxGroup(
|
262 |
+
choices=[
|
263 |
+
"Agatha Christie",
|
264 |
+
"Arthur Conan Doyle",
|
265 |
+
"Charles Bukowski",
|
266 |
+
"Charles Dickens",
|
267 |
+
"Chinua Achebe",
|
268 |
+
"Cormac McCarthy",
|
269 |
+
"David Foster Wallace",
|
270 |
+
"Edgar Allan Poe",
|
271 |
+
"F. Scott Fitzgerald",
|
272 |
+
"Flannery O'Connor",
|
273 |
+
"Franz Kafka",
|
274 |
+
"Fyodor Dostoevsky",
|
275 |
+
"Gabriel Garcia Marquez",
|
276 |
+
"George R.R. Martin",
|
277 |
+
"George Orwell",
|
278 |
+
"Haruki Murakami",
|
279 |
+
"Hemingway",
|
280 |
+
"Herman Melville",
|
281 |
+
"Isabel Allende",
|
282 |
+
"James Joyce",
|
283 |
+
"Jane Austen",
|
284 |
+
"J.K. Rowling",
|
285 |
+
"J.R.R. Tolkien",
|
286 |
+
"Jorge Luis Borges",
|
287 |
+
"Kurt Vonnegut",
|
288 |
+
"Leo Tolstoy",
|
289 |
+
"Margaret Atwood",
|
290 |
+
"Mark Twain",
|
291 |
+
"Mary Shelley",
|
292 |
+
"Milan Kundera",
|
293 |
+
"Naguib Mahfouz",
|
294 |
+
"Neil Gaiman",
|
295 |
+
"Octavia Butler",
|
296 |
+
"Philip K Dick",
|
297 |
+
"Ray Bradbury",
|
298 |
+
"Salman Rushdie",
|
299 |
+
"Shakespeare",
|
300 |
+
"Stephen King",
|
301 |
+
"Toni Morrison",
|
302 |
+
"T.S. Eliot",
|
303 |
+
"Ursula K. Le Guin",
|
304 |
+
"Virginia Woolf",
|
305 |
+
"Virginia Woolf",
|
306 |
+
"Zadie Smith"],
|
307 |
+
label="Compare Multiple Persona's Feedback at Once(Compares existing feedback, doesn't create new ones)"
|
308 |
+
)
|
309 |
+
with gr.Row():
|
310 |
+
compare_button = gr.Button("Compare Feedback")
|
311 |
+
|
312 |
+
feedback_history = gr.State([])
|
313 |
+
|
314 |
+
def add_custom_persona(name, description):
|
315 |
+
updated_choices = persona_dropdown.choices + [name]
|
316 |
+
persona_prompts[name] = f"As {name}, {description}, provide feedback on the following text:"
|
317 |
+
return gr.update(choices=updated_choices)
|
318 |
+
|
319 |
+
def update_feedback_history(current_text, persona, feedback):
|
320 |
+
# Ensure feedback_history.value is initialized and is a list
|
321 |
+
if feedback_history.value is None:
|
322 |
+
feedback_history.value = []
|
323 |
+
|
324 |
+
history = feedback_history.value
|
325 |
+
|
326 |
+
# Append the new entry to the history
|
327 |
+
history.append({"text": current_text, "persona": persona, "feedback": feedback})
|
328 |
+
|
329 |
+
# Keep only the last 5 entries in the history
|
330 |
+
feedback_history.value = history[-10:]
|
331 |
+
|
332 |
+
# Generate and return the updated HTML
|
333 |
+
return generate_feedback_history_html(feedback_history.value)
|
334 |
+
|
335 |
+
def compare_feedback(text, selected_personas, api_name, api_key):
|
336 |
+
results = []
|
337 |
+
for persona in selected_personas:
|
338 |
+
feedback = generate_writing_feedback(text, persona, "Overall", api_name, api_key)
|
339 |
+
results.append(f"### {persona}'s Feedback:\n{feedback}\n\n")
|
340 |
+
return "\n".join(results)
|
341 |
+
|
342 |
+
add_custom_persona_button.click(
|
343 |
+
fn=add_custom_persona,
|
344 |
+
inputs=[custom_persona_name, custom_persona_description],
|
345 |
+
outputs=persona_dropdown
|
346 |
+
)
|
347 |
+
|
348 |
+
get_feedback_button.click(
|
349 |
+
fn=lambda text, persona, aspect, api_name, api_key: (
|
350 |
+
generate_writing_feedback(text, persona, aspect, api_name, api_key),
|
351 |
+
calculate_readability(text),
|
352 |
+
update_feedback_history(text, persona, generate_writing_feedback(text, persona, aspect, api_name, api_key))
|
353 |
+
),
|
354 |
+
inputs=[input_text, persona_dropdown, aspect_dropdown, api_name_input, api_key_input],
|
355 |
+
outputs=[feedback_output, readability_output, feedback_history_display]
|
356 |
+
)
|
357 |
+
|
358 |
+
compare_button.click(
|
359 |
+
fn=compare_feedback,
|
360 |
+
inputs=[input_text, compare_personas, api_name_input, api_key_input],
|
361 |
+
outputs=feedback_output
|
362 |
+
)
|
363 |
+
|
364 |
+
generate_prompt_button.click(
|
365 |
+
fn=generate_writing_prompt,
|
366 |
+
inputs=[persona_dropdown, api_name_input, api_key_input],
|
367 |
+
outputs=input_text
|
368 |
+
)
|
369 |
+
|
370 |
+
return input_text, feedback_output, readability_output, feedback_history_display
|
371 |
+
|
372 |
+
|
373 |
+
def create_creative_writing_tab():
|
374 |
+
with gr.TabItem("Creative Writing Assistant"):
|
375 |
+
gr.Markdown("# Utility to be added...")
|
376 |
+
|
377 |
+
|
378 |
+
def chat_with_character(user_message, history, char_data, api_name_input, api_key):
|
379 |
+
if char_data is None:
|
380 |
+
return history, "Please import a character card first."
|
381 |
+
|
382 |
+
bot_message = generate_writing_feedback(user_message, char_data['name'], "Overall", api_name_input,
|
383 |
+
api_key)
|
384 |
+
history.append((user_message, bot_message))
|
385 |
+
return history, ""
|
386 |
+
|
387 |
+
def import_character_card(file):
|
388 |
+
if file is None:
|
389 |
+
logging.warning("No file provided for character card import")
|
390 |
+
return None
|
391 |
+
try:
|
392 |
+
if file.name.lower().endswith(('.png', '.webp')):
|
393 |
+
logging.info(f"Attempting to import character card from image: {file.name}")
|
394 |
+
json_data = extract_json_from_image(file)
|
395 |
+
if json_data:
|
396 |
+
logging.info("JSON data extracted from image, attempting to parse")
|
397 |
+
return import_character_card_json(json_data)
|
398 |
+
else:
|
399 |
+
logging.warning("No JSON data found in the image")
|
400 |
+
else:
|
401 |
+
logging.info(f"Attempting to import character card from JSON file: {file.name}")
|
402 |
+
content = file.read().decode('utf-8')
|
403 |
+
return import_character_card_json(content)
|
404 |
+
except Exception as e:
|
405 |
+
logging.error(f"Error importing character card: {e}")
|
406 |
+
return None
|
407 |
+
|
408 |
+
|
409 |
+
def import_character_card_json(json_content):
|
410 |
+
try:
|
411 |
+
# Remove any leading/trailing whitespace
|
412 |
+
json_content = json_content.strip()
|
413 |
+
|
414 |
+
# Log the first 100 characters of the content
|
415 |
+
logging.debug(f"JSON content (first 100 chars): {json_content[:100]}...")
|
416 |
+
|
417 |
+
card_data = json.loads(json_content)
|
418 |
+
logging.debug(f"Parsed JSON data keys: {list(card_data.keys())}")
|
419 |
+
if 'spec' in card_data and card_data['spec'] == 'chara_card_v2':
|
420 |
+
logging.info("Detected V2 character card")
|
421 |
+
return card_data['data']
|
422 |
+
else:
|
423 |
+
logging.info("Assuming V1 character card")
|
424 |
+
return card_data
|
425 |
+
except json.JSONDecodeError as e:
|
426 |
+
logging.error(f"JSON decode error: {e}")
|
427 |
+
logging.error(f"Problematic JSON content: {json_content[:500]}...")
|
428 |
+
except Exception as e:
|
429 |
+
logging.error(f"Unexpected error parsing JSON: {e}")
|
430 |
+
return None
|
431 |
+
|
432 |
+
|
433 |
+
def extract_json_from_image(image_file):
|
434 |
+
logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
|
435 |
+
try:
|
436 |
+
with Image.open(image_file) as img:
|
437 |
+
logging.debug("Image opened successfully")
|
438 |
+
metadata = img.info
|
439 |
+
if 'chara' in metadata:
|
440 |
+
logging.debug("Found 'chara' in image metadata")
|
441 |
+
chara_content = metadata['chara']
|
442 |
+
logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
|
443 |
+
try:
|
444 |
+
decoded_content = base64.b64decode(chara_content).decode('utf-8')
|
445 |
+
logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
|
446 |
+
return decoded_content
|
447 |
+
except Exception as e:
|
448 |
+
logging.error(f"Error decoding base64 content: {e}")
|
449 |
+
|
450 |
+
logging.debug("'chara' not found in metadata, checking for base64 encoded data")
|
451 |
+
raw_data = img.tobytes()
|
452 |
+
possible_json = raw_data.split(b'{', 1)[-1].rsplit(b'}', 1)[0]
|
453 |
+
if possible_json:
|
454 |
+
try:
|
455 |
+
decoded = base64.b64decode(possible_json).decode('utf-8')
|
456 |
+
if decoded.startswith('{') and decoded.endswith('}'):
|
457 |
+
logging.debug("Found and decoded base64 JSON data")
|
458 |
+
return '{' + decoded + '}'
|
459 |
+
except Exception as e:
|
460 |
+
logging.error(f"Error decoding base64 data: {e}")
|
461 |
+
|
462 |
+
logging.warning("No JSON data found in the image")
|
463 |
+
except Exception as e:
|
464 |
+
logging.error(f"Error extracting JSON from image: {e}")
|
465 |
+
return None
|
466 |
+
|
467 |
+
def load_chat_history(file):
|
468 |
+
try:
|
469 |
+
content = file.read().decode('utf-8')
|
470 |
+
chat_data = json.loads(content)
|
471 |
+
return chat_data['history'], chat_data['character']
|
472 |
+
except Exception as e:
|
473 |
+
logging.error(f"Error loading chat history: {e}")
|
474 |
+
return None, None
|
475 |
+
|
476 |
+
|
477 |
+
# FIXME This should be in the chat tab....
|
478 |
+
def create_character_card_interaction_tab():
|
479 |
+
with gr.TabItem("Chat with a Character Card"):
|
480 |
+
gr.Markdown("# Chat with a Character Card")
|
481 |
+
with gr.Row():
|
482 |
+
with gr.Column(scale=1):
|
483 |
+
character_card_upload = gr.File(label="Upload Character Card")
|
484 |
+
import_card_button = gr.Button("Import Character Card")
|
485 |
+
load_characters_button = gr.Button("Load Existing Characters")
|
486 |
+
from App_Function_Libraries.Chat import get_character_names
|
487 |
+
character_dropdown = gr.Dropdown(label="Select Character", choices=get_character_names())
|
488 |
+
api_name_input = gr.Dropdown(
|
489 |
+
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
|
490 |
+
"OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
|
491 |
+
value=None,
|
492 |
+
# FIXME - make it so the user cant' click `Send Message` without first setting an API + Chatbot
|
493 |
+
label="API for Interaction(Mandatory)"
|
494 |
+
)
|
495 |
+
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)",
|
496 |
+
placeholder="Enter your API key here", type="password")
|
497 |
+
temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
498 |
+
import_chat_button = gr.Button("Import Chat History")
|
499 |
+
chat_file_upload = gr.File(label="Upload Chat History JSON", visible=False)
|
500 |
+
|
501 |
+
|
502 |
+
with gr.Column(scale=2):
|
503 |
+
chat_history = gr.Chatbot(label="Conversation")
|
504 |
+
user_input = gr.Textbox(label="Your message")
|
505 |
+
send_message_button = gr.Button("Send Message")
|
506 |
+
regenerate_button = gr.Button("Regenerate Last Message")
|
507 |
+
save_chat_button = gr.Button("Save This Chat")
|
508 |
+
save_status = gr.Textbox(label="Save Status", interactive=False)
|
509 |
+
|
510 |
+
character_data = gr.State(None)
|
511 |
+
|
512 |
+
def import_chat_history(file, current_history, char_data):
|
513 |
+
loaded_history, char_name = load_chat_history(file)
|
514 |
+
if loaded_history is None:
|
515 |
+
return current_history, char_data, "Failed to load chat history."
|
516 |
+
|
517 |
+
# Check if the loaded chat is for the current character
|
518 |
+
if char_data and char_data.get('name') != char_name:
|
519 |
+
return current_history, char_data, f"Warning: Loaded chat is for character '{char_name}', but current character is '{char_data.get('name')}'. Chat not imported."
|
520 |
+
|
521 |
+
# If no character is selected, try to load the character from the chat
|
522 |
+
if not char_data:
|
523 |
+
new_char_data = load_character(char_name)[0]
|
524 |
+
if new_char_data:
|
525 |
+
char_data = new_char_data
|
526 |
+
else:
|
527 |
+
return current_history, char_data, f"Warning: Character '{char_name}' not found. Please select the character manually."
|
528 |
+
|
529 |
+
return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
|
530 |
+
|
531 |
+
def import_character(file):
|
532 |
+
card_data = import_character_card(file)
|
533 |
+
if card_data:
|
534 |
+
from App_Function_Libraries.Chat import save_character
|
535 |
+
save_character(card_data)
|
536 |
+
return card_data, gr.update(choices=get_character_names())
|
537 |
+
else:
|
538 |
+
return None, gr.update()
|
539 |
+
|
540 |
+
def load_character(name):
|
541 |
+
from App_Function_Libraries.Chat import load_characters
|
542 |
+
characters = load_characters()
|
543 |
+
char_data = characters.get(name)
|
544 |
+
if char_data:
|
545 |
+
first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
|
546 |
+
return char_data, [(None, first_message)] if first_message else []
|
547 |
+
return None, []
|
548 |
+
|
549 |
+
def character_chat_wrapper(message, history, char_data, api_endpoint, api_key, temperature):
|
550 |
+
logging.debug("Entered character_chat_wrapper")
|
551 |
+
if char_data is None:
|
552 |
+
return "Please select a character first.", history
|
553 |
+
|
554 |
+
# Prepare the character's background information
|
555 |
+
char_background = f"""
|
556 |
+
Name: {char_data.get('name', 'Unknown')}
|
557 |
+
Description: {char_data.get('description', 'N/A')}
|
558 |
+
Personality: {char_data.get('personality', 'N/A')}
|
559 |
+
Scenario: {char_data.get('scenario', 'N/A')}
|
560 |
+
"""
|
561 |
+
|
562 |
+
# Prepare the system prompt for character impersonation
|
563 |
+
system_message = f"""You are roleplaying as the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI.
|
564 |
+
|
565 |
+
{char_background}
|
566 |
+
|
567 |
+
Additional instructions: {char_data.get('post_history_instructions', '')}
|
568 |
+
"""
|
569 |
+
|
570 |
+
# Prepare media_content and selected_parts
|
571 |
+
media_content = {
|
572 |
+
'id': char_data.get('name'),
|
573 |
+
'title': char_data.get('name', 'Unknown Character'),
|
574 |
+
'content': char_background,
|
575 |
+
'description': char_data.get('description', ''),
|
576 |
+
'personality': char_data.get('personality', ''),
|
577 |
+
'scenario': char_data.get('scenario', '')
|
578 |
+
}
|
579 |
+
selected_parts = ['description', 'personality', 'scenario']
|
580 |
+
|
581 |
+
prompt = char_data.get('post_history_instructions', '')
|
582 |
+
|
583 |
+
# Prepare the input for the chat function
|
584 |
+
if not history:
|
585 |
+
full_message = f"{prompt}\n\n{message}" if prompt else message
|
586 |
+
else:
|
587 |
+
full_message = message
|
588 |
+
|
589 |
+
# Call the chat function
|
590 |
+
bot_message = chat(
|
591 |
+
message,
|
592 |
+
history,
|
593 |
+
media_content,
|
594 |
+
selected_parts,
|
595 |
+
api_endpoint,
|
596 |
+
api_key,
|
597 |
+
prompt,
|
598 |
+
temperature,
|
599 |
+
system_message
|
600 |
+
)
|
601 |
+
|
602 |
+
# Update history
|
603 |
+
history.append((message, bot_message))
|
604 |
+
return history
|
605 |
+
|
606 |
+
def save_chat_history(history, character_name):
|
607 |
+
# Create the Saved_Chats folder if it doesn't exist
|
608 |
+
save_directory = "Saved_Chats"
|
609 |
+
os.makedirs(save_directory, exist_ok=True)
|
610 |
+
|
611 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
612 |
+
filename = f"chat_history_{character_name}_{timestamp}.json"
|
613 |
+
filepath = os.path.join(save_directory, filename)
|
614 |
+
|
615 |
+
chat_data = {
|
616 |
+
"character": character_name,
|
617 |
+
"timestamp": timestamp,
|
618 |
+
"history": history
|
619 |
+
}
|
620 |
+
|
621 |
+
try:
|
622 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
623 |
+
json.dump(chat_data, f, ensure_ascii=False, indent=2)
|
624 |
+
return filepath
|
625 |
+
except Exception as e:
|
626 |
+
return f"Error saving chat: {str(e)}"
|
627 |
+
|
628 |
+
def save_current_chat(history, char_data):
|
629 |
+
if not char_data or not history:
|
630 |
+
return "No chat to save or character not selected."
|
631 |
+
|
632 |
+
character_name = char_data.get('name', 'Unknown')
|
633 |
+
result = save_chat_history(history, character_name)
|
634 |
+
if result.startswith("Error"):
|
635 |
+
return result
|
636 |
+
return f"Chat saved successfully as {result}"
|
637 |
+
|
638 |
+
def regenerate_last_message(history, char_data, api_name, api_key, temperature):
|
639 |
+
if not history:
|
640 |
+
return history
|
641 |
+
|
642 |
+
last_user_message = history[-1][0]
|
643 |
+
new_history = history[:-1]
|
644 |
+
|
645 |
+
return character_chat_wrapper(last_user_message, new_history, char_data, api_name, api_key, temperature)
|
646 |
+
|
647 |
+
import_chat_button.click(
|
648 |
+
fn=lambda: gr.update(visible=True),
|
649 |
+
outputs=chat_file_upload
|
650 |
+
)
|
651 |
+
|
652 |
+
chat_file_upload.change(
|
653 |
+
fn=import_chat_history,
|
654 |
+
inputs=[chat_file_upload, chat_history, character_data],
|
655 |
+
outputs=[chat_history, character_data, save_status]
|
656 |
+
)
|
657 |
+
|
658 |
+
import_card_button.click(
|
659 |
+
fn=import_character,
|
660 |
+
inputs=[character_card_upload],
|
661 |
+
outputs=[character_data, character_dropdown]
|
662 |
+
)
|
663 |
+
|
664 |
+
load_characters_button.click(
|
665 |
+
fn=lambda: gr.update(choices=get_character_names()),
|
666 |
+
outputs=character_dropdown
|
667 |
+
)
|
668 |
+
|
669 |
+
character_dropdown.change(
|
670 |
+
fn=load_character,
|
671 |
+
inputs=[character_dropdown],
|
672 |
+
outputs=[character_data, chat_history]
|
673 |
+
)
|
674 |
+
|
675 |
+
send_message_button.click(
|
676 |
+
fn=character_chat_wrapper,
|
677 |
+
inputs=[user_input, chat_history, character_data, api_name_input, api_key_input, temperature_slider],
|
678 |
+
outputs=[chat_history]
|
679 |
+
).then(lambda: "", outputs=user_input)
|
680 |
+
|
681 |
+
regenerate_button.click(
|
682 |
+
fn=regenerate_last_message,
|
683 |
+
inputs=[chat_history, character_data, api_name_input, api_key_input, temperature_slider],
|
684 |
+
outputs=[chat_history]
|
685 |
+
)
|
686 |
+
|
687 |
+
save_chat_button.click(
|
688 |
+
fn=save_current_chat,
|
689 |
+
inputs=[chat_history, character_data],
|
690 |
+
outputs=[save_status]
|
691 |
+
)
|
692 |
+
|
693 |
+
return character_data, chat_history, user_input
|
694 |
+
|
695 |
+
|
696 |
+
def create_mikupad_tab():
|
697 |
+
with gr.TabItem("Mikupad"):
|
698 |
+
gr.Markdown("I Wish. Gradio won't embed it successfully...")
|
699 |
+
|
700 |
+
#
|
701 |
+
# End of Writing_tab.py
|
702 |
+
########################################################################################################################
|