oceansweep commited on
Commit
cb782bd
1 Parent(s): 28d9ecb

Upload 13 files

Browse files
App_Function_Libraries/Gradio_UI/Arxiv_tab.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Arxiv_tab.py
2
+ # Description: This file contains the Gradio UI for searching, browsing, and ingesting arXiv papers.
3
+ #
4
+ # Imports
5
+ import tempfile
6
+ from datetime import datetime
7
+ import requests
8
+
9
+ from App_Function_Libraries.PDF.PDF_Ingestion_Lib import extract_text_and_format_from_pdf
10
+ #
11
+ # Local Imports
12
+ from App_Function_Libraries.Third_Party.Arxiv import convert_xml_to_markdown, fetch_arxiv_xml, parse_arxiv_feed, \
13
+ build_query_url, ARXIV_PAGE_SIZE, fetch_arxiv_pdf_url
14
+ from App_Function_Libraries.DB.DB_Manager import add_media_with_keywords
15
+ #
16
+ import gradio as gr
17
+ #
18
+ #####################################################################################################
19
+ #
20
+ # Functions:
21
+
22
+ def create_arxiv_tab():
23
+ with gr.TabItem("Arxiv Search & Ingest"):
24
+ gr.Markdown("# arXiv Search, Browse, Download, and Ingest")
25
+ gr.Markdown("#### Thank you to arXiv for use of its open access interoperability.")
26
+ with gr.Row():
27
+ with gr.Column(scale=1):
28
+ # Search Inputs
29
+ with gr.Row():
30
+ with gr.Column():
31
+ search_query = gr.Textbox(label="Search Query", placeholder="e.g., machine learning")
32
+ author_filter = gr.Textbox(label="Author", placeholder="e.g., John Doe")
33
+ year_filter = gr.Number(label="Year", precision=0)
34
+ search_button = gr.Button("Search")
35
+
36
+ with gr.Column(scale=2):
37
+ # Pagination Controls
38
+ paper_selector = gr.Radio(label="Select a Paper", choices=[], interactive=True)
39
+ prev_button = gr.Button("Previous Page")
40
+ next_button = gr.Button("Next Page")
41
+ page_info = gr.Textbox(label="Page", value="1", interactive=False)
42
+
43
+ # Ingestion Section
44
+ with gr.Row():
45
+ with gr.Column():
46
+ # Paper Details View
47
+ paper_view = gr.Markdown(label="Paper Details")
48
+ arxiv_keywords = gr.Textbox(label="Additional Keywords (comma-separated)",
49
+ placeholder="e.g., AI, Deep Learning")
50
+ ingest_button = gr.Button("Ingest Selected Paper")
51
+ ingest_result = gr.Textbox(label="Ingestion Result", interactive=False)
52
+
53
+ # Define States for Pagination and Selection
54
+ state = gr.State(value={"start": 0, "current_page": 1, "last_query": None, "entries": []})
55
+ selected_paper_id = gr.State(value=None)
56
+
57
+ def search_arxiv(query, author, year):
58
+ start = 0
59
+ url = build_query_url(query, author, year, start)
60
+ try:
61
+ response = requests.get(url)
62
+ response.raise_for_status()
63
+ except requests.exceptions.RequestException as e:
64
+ return gr.update(value=[]), gr.update(value=f"**Error:** {str(e)}"), state.value
65
+
66
+ entries = parse_arxiv_feed(response.text)
67
+ state.value = {"start": start, "current_page": 1, "last_query": (query, author, year), "entries": entries}
68
+ if not entries:
69
+ return gr.update(value=[]), "No results found.", state.value
70
+
71
+ # Update the dropdown with paper titles for selection
72
+ titles = [entry['title'] for entry in entries]
73
+ return gr.update(choices=titles), "1", state.value
74
+
75
+ # Dead code? FIXME
76
+ def handle_pagination(direction):
77
+ current_state = state.value
78
+ query, author, year = current_state["last_query"]
79
+ new_page = current_state["current_page"] + direction
80
+ if new_page < 1:
81
+ new_page = 1
82
+ start = (new_page - 1) * ARXIV_PAGE_SIZE
83
+ url = build_query_url(query, author, year, start)
84
+ try:
85
+ response = requests.get(url)
86
+ response.raise_for_status()
87
+ except requests.exceptions.RequestException as e:
88
+ return gr.update(), gr.update()
89
+
90
+ entries = parse_arxiv_feed(response.text)
91
+ if entries:
92
+ current_state["start"] = start
93
+ current_state["current_page"] = new_page
94
+ current_state["entries"] = entries
95
+ state.value = current_state
96
+
97
+ # Update the dropdown with paper titles for the new page
98
+ titles = [entry['title'] for entry in entries]
99
+ return gr.update(choices=titles), str(new_page)
100
+ else:
101
+ # If no entries, do not change the page
102
+ return gr.update(), gr.update()
103
+
104
+ def load_selected_paper(selected_title):
105
+ if not selected_title:
106
+ return "Please select a paper to view."
107
+
108
+ # Find the selected paper from state
109
+ for entry in state.value["entries"]:
110
+ if entry['title'] == selected_title:
111
+ paper_id = entry['id']
112
+ break
113
+ else:
114
+ return "Paper not found."
115
+
116
+ try:
117
+ # Fetch the PDF URL and download the full-text
118
+ pdf_url = fetch_arxiv_pdf_url(paper_id)
119
+ response = requests.get(pdf_url)
120
+ response.raise_for_status()
121
+
122
+ # Save the PDF temporarily
123
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
124
+ temp_pdf.write(response.content)
125
+ temp_pdf_path = temp_pdf.name
126
+
127
+ # Convert PDF to markdown using your PDF ingestion function
128
+ full_text_markdown = extract_text_and_format_from_pdf(temp_pdf_path)
129
+
130
+ selected_paper_id.value = paper_id
131
+ return full_text_markdown
132
+ except Exception as e:
133
+ return f"Error loading full paper: {str(e)}"
134
+
135
+ def process_and_ingest_arxiv_paper(paper_id, additional_keywords):
136
+ try:
137
+ if not paper_id:
138
+ return "Please select a paper to ingest."
139
+
140
+ # Fetch the PDF URL
141
+ pdf_url = fetch_arxiv_pdf_url(paper_id)
142
+
143
+ # Download the PDF
144
+ response = requests.get(pdf_url)
145
+ response.raise_for_status()
146
+
147
+ # Save the PDF temporarily
148
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf:
149
+ temp_pdf.write(response.content)
150
+ temp_pdf_path = temp_pdf.name
151
+
152
+ # Convert PDF to markdown using your PDF ingestion function
153
+ markdown_text = extract_text_and_format_from_pdf(temp_pdf_path)
154
+
155
+ # Fetch metadata from arXiv to get title, authors, and categories
156
+ xml_content = fetch_arxiv_xml(paper_id)
157
+ _, title, authors, categories = convert_xml_to_markdown(xml_content)
158
+
159
+ # Prepare the arXiv paper URL for access/download
160
+ paper_url = f"https://arxiv.org/abs/{paper_id}"
161
+
162
+ # Prepare the keywords for ingestion
163
+ keywords = f"arxiv,{','.join(categories)}"
164
+ if additional_keywords:
165
+ keywords += f",{additional_keywords}"
166
+
167
+ # Ingest full paper markdown content
168
+ add_media_with_keywords(
169
+ url=paper_url,
170
+ title=title,
171
+ media_type='document',
172
+ content=markdown_text, # Full paper content in markdown
173
+ keywords=keywords,
174
+ prompt='No prompt for arXiv papers',
175
+ summary='Full arXiv paper ingested from PDF',
176
+ transcription_model='None',
177
+ author=', '.join(authors),
178
+ ingestion_date=datetime.now().strftime('%Y-%m-%d')
179
+ )
180
+
181
+ # Return success message with paper title and authors
182
+ return f"arXiv paper '{title}' by {', '.join(authors)} ingested successfully."
183
+ except Exception as e:
184
+ # Return error message if anything goes wrong
185
+ return f"Error processing arXiv paper: {str(e)}"
186
+
187
+ # Event Handlers
188
+ # Connect Search Button
189
+ search_button.click(
190
+ fn=search_arxiv,
191
+ inputs=[search_query, author_filter, year_filter],
192
+ outputs=[paper_selector, page_info, state],
193
+ queue=True
194
+ )
195
+
196
+ # Connect Next Button
197
+ next_button.click(
198
+ fn=lambda: handle_pagination(1),
199
+ inputs=None,
200
+ outputs=[paper_selector, page_info],
201
+ queue=True
202
+ )
203
+
204
+ # Connect Previous Button
205
+ prev_button.click(
206
+ fn=lambda: handle_pagination(-1),
207
+ inputs=None,
208
+ outputs=[paper_selector, page_info],
209
+ queue=True
210
+ )
211
+
212
+ # When the user selects a paper in the Dropdown
213
+ paper_selector.change(
214
+ fn=load_selected_paper,
215
+ inputs=paper_selector,
216
+ outputs=paper_view,
217
+ queue=True
218
+ )
219
+
220
+ # Connect Ingest Button
221
+ ingest_button.click(
222
+ fn=process_and_ingest_arxiv_paper,
223
+ inputs=[selected_paper_id, arxiv_keywords],
224
+ outputs=ingest_result,
225
+ queue=True
226
+ )
227
+
228
+ #
229
+ # End of File
230
+ #####################################################################################################
App_Function_Libraries/Gradio_UI/Character_Chat_tab.py ADDED
@@ -0,0 +1,1318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Character_Interaction_Library_3.py
2
+ # Description: Library for character card import functions
3
+ #
4
+ # Imports
5
+ import re
6
+ import uuid
7
+ from datetime import datetime
8
+ import json
9
+ import logging
10
+ import io
11
+ import base64
12
+ from typing import Dict, Any, Optional, List, Tuple, Union, cast
13
+ #
14
+ # External Imports
15
+ from PIL import Image
16
+ import gradio as gr
17
+ #
18
+ # Local Imports
19
+ from App_Function_Libraries.Chat import chat
20
+ from App_Function_Libraries.DB.Character_Chat_DB import (
21
+ add_character_card,
22
+ get_character_cards,
23
+ get_character_card_by_id,
24
+ add_character_chat,
25
+ get_character_chats,
26
+ get_character_chat_by_id,
27
+ update_character_chat,
28
+ delete_character_chat,
29
+ delete_character_card,
30
+ update_character_card, search_character_chats,
31
+ )
32
+ from App_Function_Libraries.Utils.Utils import sanitize_user_input
33
+ #
34
+ ############################################################################################################
35
+ #
36
+ # Functions:
37
+
38
+
39
+ #################################################################################
40
+ #
41
+ # Placeholder functions:
42
+
43
+ def replace_placeholders(text: str, char_name: str, user_name: str) -> str:
44
+ """
45
+ Replace placeholders in the given text with appropriate values.
46
+
47
+ Args:
48
+ text (str): The text containing placeholders.
49
+ char_name (str): The name of the character.
50
+ user_name (str): The name of the user.
51
+
52
+ Returns:
53
+ str: The text with placeholders replaced.
54
+ """
55
+ replacements = {
56
+ '{{char}}': char_name,
57
+ '{{user}}': user_name,
58
+ '{{random_user}}': user_name # Assuming random_user is the same as user for simplicity
59
+ }
60
+
61
+ for placeholder, value in replacements.items():
62
+ text = text.replace(placeholder, value)
63
+
64
+ return text
65
+
66
+ def replace_user_placeholder(history, user_name):
67
+ """
68
+ Replaces all instances of '{{user}}' in the chat history with the actual user name.
69
+
70
+ Args:
71
+ history (list): The current chat history as a list of tuples (user_message, bot_message).
72
+ user_name (str): The name entered by the user.
73
+
74
+ Returns:
75
+ list: Updated chat history with placeholders replaced.
76
+ """
77
+ if not user_name:
78
+ user_name = "User" # Default name if none provided
79
+
80
+ updated_history = []
81
+ for user_msg, bot_msg in history:
82
+ # Replace in user message
83
+ if user_msg:
84
+ user_msg = user_msg.replace("{{user}}", user_name)
85
+ # Replace in bot message
86
+ if bot_msg:
87
+ bot_msg = bot_msg.replace("{{user}}", user_name)
88
+ updated_history.append((user_msg, bot_msg))
89
+ return updated_history
90
+
91
+ #
92
+ # End of Placeholder functions
93
+ #################################################################################
94
+
95
+ def import_character_card(file):
96
+ if file is None:
97
+ return None, gr.update(), "No file provided for character card import"
98
+
99
+ try:
100
+ if file.name.lower().endswith(('.png', '.webp')):
101
+ json_data = extract_json_from_image(file)
102
+ if not json_data:
103
+ return None, gr.update(), "No JSON data found in the image."
104
+ elif file.name.lower().endswith('.json'):
105
+ with open(file.name, 'r', encoding='utf-8') as f:
106
+ json_data = f.read()
107
+ else:
108
+ return None, gr.update(), "Unsupported file type. Please upload a PNG/WebP image or a JSON file."
109
+
110
+ card_data = import_character_card_json(json_data)
111
+ if not card_data:
112
+ return None, gr.update(), "Failed to parse character card JSON."
113
+
114
+ # Save image data for PNG/WebP files
115
+ if file.name.lower().endswith(('.png', '.webp')):
116
+ with Image.open(file) as img:
117
+ img_byte_arr = io.BytesIO()
118
+ img.save(img_byte_arr, format='PNG')
119
+ card_data['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
120
+
121
+ # Save character card to database
122
+ character_id = add_character_card(card_data)
123
+ if character_id:
124
+ characters = get_character_cards()
125
+ character_names = [char['name'] for char in characters]
126
+ return card_data, gr.update(
127
+ choices=character_names), f"Character card '{card_data['name']}' imported successfully."
128
+ else:
129
+ return None, gr.update(), f"Failed to save character card '{card_data.get('name', 'Unknown')}'. It may already exist."
130
+ except Exception as e:
131
+ logging.error(f"Error importing character card: {e}")
132
+ return None, gr.update(), f"Error importing character card: {e}"
133
+
134
+ def import_character_card_json(json_content: str) -> Optional[Dict[str, Any]]:
135
+ try:
136
+ json_content = json_content.strip()
137
+ card_data = json.loads(json_content)
138
+
139
+ if 'spec' in card_data and card_data['spec'] == 'chara_card_v2':
140
+ logging.info("Detected V2 character card")
141
+ return parse_v2_card(card_data)
142
+ else:
143
+ logging.info("Assuming V1 character card")
144
+ return parse_v1_card(card_data)
145
+ except json.JSONDecodeError as e:
146
+ logging.error(f"JSON decode error: {e}")
147
+ except Exception as e:
148
+ logging.error(f"Unexpected error parsing JSON: {e}")
149
+ return None
150
+
151
+ def extract_json_from_image(image_file):
152
+ logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
153
+ try:
154
+ with Image.open(image_file) as img:
155
+ logging.debug("Image opened successfully")
156
+ metadata = img.info
157
+ if 'chara' in metadata:
158
+ logging.debug("Found 'chara' in image metadata")
159
+ chara_content = metadata['chara']
160
+ logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
161
+ try:
162
+ decoded_content = base64.b64decode(chara_content).decode('utf-8')
163
+ logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
164
+ return decoded_content
165
+ except Exception as e:
166
+ logging.error(f"Error decoding base64 content: {e}")
167
+
168
+ logging.warning("'chara' not found in metadata, attempting to find JSON data in image bytes")
169
+ # Alternative method to extract embedded JSON from image bytes if metadata is not available
170
+ img_byte_arr = io.BytesIO()
171
+ img.save(img_byte_arr, format='PNG')
172
+ img_bytes = img_byte_arr.getvalue()
173
+ img_str = img_bytes.decode('latin1') # Use 'latin1' to preserve byte values
174
+
175
+ # Search for JSON-like structures in the image bytes
176
+ json_start = img_str.find('{')
177
+ json_end = img_str.rfind('}')
178
+ if json_start != -1 and json_end != -1 and json_end > json_start:
179
+ possible_json = img_str[json_start:json_end+1]
180
+ try:
181
+ json.loads(possible_json)
182
+ logging.debug("Found JSON data in image bytes")
183
+ return possible_json
184
+ except json.JSONDecodeError:
185
+ logging.debug("No valid JSON found in image bytes")
186
+
187
+ logging.warning("No JSON data found in the image")
188
+ except Exception as e:
189
+ logging.error(f"Error extracting JSON from image: {e}")
190
+ return None
191
+
192
+
193
+ def process_chat_history(chat_history: List[Tuple[str, str]], char_name: str, user_name: str) -> List[Tuple[str, str]]:
194
+ """
195
+ Process the chat history to replace placeholders in both user and character messages.
196
+
197
+ Args:
198
+ chat_history (List[Tuple[str, str]]): The chat history.
199
+ char_name (str): The name of the character.
200
+ user_name (str): The name of the user.
201
+
202
+ Returns:
203
+ List[Tuple[str, str]]: The processed chat history.
204
+ """
205
+ processed_history = []
206
+ for user_msg, char_msg in chat_history:
207
+ if user_msg:
208
+ user_msg = replace_placeholders(user_msg, char_name, user_name)
209
+ if char_msg:
210
+ char_msg = replace_placeholders(char_msg, char_name, user_name)
211
+ processed_history.append((user_msg, char_msg))
212
+ return processed_history
213
+
214
+ def parse_v2_card(card_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
215
+ try:
216
+ # Validate spec_version
217
+ if card_data.get('spec_version') != '2.0':
218
+ logging.warning(f"Unsupported V2 spec version: {card_data.get('spec_version')}")
219
+ return None
220
+
221
+ data = card_data['data']
222
+
223
+ # Ensure all required fields are present
224
+ required_fields = ['name', 'description', 'personality', 'scenario', 'first_mes', 'mes_example']
225
+ for field in required_fields:
226
+ if field not in data:
227
+ logging.error(f"Missing required field in V2 card: {field}")
228
+ return None
229
+
230
+ # Handle new V2 fields
231
+ parsed_data = {
232
+ 'name': data['name'],
233
+ 'description': data['description'],
234
+ 'personality': data['personality'],
235
+ 'scenario': data['scenario'],
236
+ 'first_mes': data['first_mes'],
237
+ 'mes_example': data['mes_example'],
238
+ 'creator_notes': data.get('creator_notes', ''),
239
+ 'system_prompt': data.get('system_prompt', ''),
240
+ 'post_history_instructions': data.get('post_history_instructions', ''),
241
+ 'alternate_greetings': data.get('alternate_greetings', []),
242
+ 'tags': data.get('tags', []),
243
+ 'creator': data.get('creator', ''),
244
+ 'character_version': data.get('character_version', ''),
245
+ 'extensions': data.get('extensions', {})
246
+ }
247
+
248
+ # Handle character_book if present
249
+ if 'character_book' in data:
250
+ parsed_data['character_book'] = parse_character_book(data['character_book'])
251
+
252
+ return parsed_data
253
+ except KeyError as e:
254
+ logging.error(f"Missing key in V2 card structure: {e}")
255
+ except Exception as e:
256
+ logging.error(f"Error parsing V2 card: {e}")
257
+ return None
258
+
259
+ def parse_v1_card(card_data: Dict[str, Any]) -> Dict[str, Any]:
260
+ # Ensure all required V1 fields are present
261
+ required_fields = ['name', 'description', 'personality', 'scenario', 'first_mes', 'mes_example']
262
+ for field in required_fields:
263
+ if field not in card_data:
264
+ logging.error(f"Missing required field in V1 card: {field}")
265
+ raise ValueError(f"Missing required field in V1 card: {field}")
266
+
267
+ # Convert V1 to V2 format
268
+ v2_data: Dict[str, Union[str, List[str], Dict[str, Any]]] = {
269
+ 'name': card_data['name'],
270
+ 'description': card_data['description'],
271
+ 'personality': card_data['personality'],
272
+ 'scenario': card_data['scenario'],
273
+ 'first_mes': card_data['first_mes'],
274
+ 'mes_example': card_data['mes_example'],
275
+ 'creator_notes': cast(str, card_data.get('creator_notes', '')),
276
+ 'system_prompt': cast(str, card_data.get('system_prompt', '')),
277
+ 'post_history_instructions': cast(str, card_data.get('post_history_instructions', '')),
278
+ 'alternate_greetings': cast(List[str], card_data.get('alternate_greetings', [])),
279
+ 'tags': cast(List[str], card_data.get('tags', [])),
280
+ 'creator': cast(str, card_data.get('creator', '')),
281
+ 'character_version': cast(str, card_data.get('character_version', '')),
282
+ 'extensions': {}
283
+ }
284
+
285
+ # Move any non-standard V1 fields to extensions
286
+ for key, value in card_data.items():
287
+ if key not in v2_data:
288
+ v2_data['extensions'][key] = value
289
+
290
+ return v2_data
291
+
292
+ def extract_character_id(choice: str) -> int:
293
+ """Extract the character ID from the dropdown selection string."""
294
+ return int(choice.split('(ID: ')[1].rstrip(')'))
295
+
296
+ def load_character_wrapper(character_id: int, user_name: str) -> Tuple[Dict[str, Any], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
297
+ """Wrapper function to load character and image using the extracted ID."""
298
+ char_data, chat_history, img = load_character_and_image(character_id, user_name)
299
+ return char_data, chat_history, img
300
+
301
+ def parse_character_book(book_data: Dict[str, Any]) -> Dict[str, Any]:
302
+ """
303
+ Parse the character book data from a V2 character card.
304
+
305
+ Args:
306
+ book_data (Dict[str, Any]): The raw character book data from the character card.
307
+
308
+ Returns:
309
+ Dict[str, Any]: The parsed and structured character book data.
310
+ """
311
+ parsed_book = {
312
+ 'name': book_data.get('name', ''),
313
+ 'description': book_data.get('description', ''),
314
+ 'scan_depth': book_data.get('scan_depth'),
315
+ 'token_budget': book_data.get('token_budget'),
316
+ 'recursive_scanning': book_data.get('recursive_scanning', False),
317
+ 'extensions': book_data.get('extensions', {}),
318
+ 'entries': []
319
+ }
320
+
321
+ for entry in book_data.get('entries', []):
322
+ parsed_entry = {
323
+ 'keys': entry['keys'],
324
+ 'content': entry['content'],
325
+ 'extensions': entry.get('extensions', {}),
326
+ 'enabled': entry['enabled'],
327
+ 'insertion_order': entry['insertion_order'],
328
+ 'case_sensitive': entry.get('case_sensitive', False),
329
+ 'name': entry.get('name', ''),
330
+ 'priority': entry.get('priority'),
331
+ 'id': entry.get('id'),
332
+ 'comment': entry.get('comment', ''),
333
+ 'selective': entry.get('selective', False),
334
+ 'secondary_keys': entry.get('secondary_keys', []),
335
+ 'constant': entry.get('constant', False),
336
+ 'position': entry.get('position')
337
+ }
338
+ parsed_book['entries'].append(parsed_entry)
339
+
340
+ return parsed_book
341
+
342
+ def load_character_and_image(character_id: int, user_name: str) -> Tuple[Optional[Dict[str, Any]], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
343
+ """
344
+ Load a character and its associated image based on the character ID.
345
+
346
+ Args:
347
+ character_id (int): The ID of the character to load.
348
+ user_name (str): The name of the user, used for placeholder replacement.
349
+
350
+ Returns:
351
+ Tuple[Optional[Dict[str, Any]], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
352
+ A tuple containing the character data, chat history, and character image (if available).
353
+ """
354
+ try:
355
+ char_data = get_character_card_by_id(character_id)
356
+ if not char_data:
357
+ logging.warning(f"No character data found for ID: {character_id}")
358
+ return None, [], None
359
+
360
+ # Replace placeholders in character data
361
+ for field in ['first_mes', 'mes_example', 'scenario', 'description', 'personality']:
362
+ if field in char_data:
363
+ char_data[field] = replace_placeholders(char_data[field], char_data['name'], user_name)
364
+
365
+ # Replace placeholders in first_mes
366
+ first_mes = char_data.get('first_mes', "Hello! I'm ready to chat.")
367
+ first_mes = replace_placeholders(first_mes, char_data['name'], user_name)
368
+
369
+ chat_history = [(None, first_mes)] if first_mes else []
370
+
371
+ img = None
372
+ if char_data.get('image'):
373
+ try:
374
+ image_data = base64.b64decode(char_data['image'])
375
+ img = Image.open(io.BytesIO(image_data)).convert("RGBA")
376
+ except Exception as e:
377
+ logging.error(f"Error processing image for character '{char_data['name']}': {e}")
378
+
379
+ return char_data, chat_history, img
380
+
381
+ except Exception as e:
382
+ logging.error(f"Error in load_character_and_image: {e}")
383
+ return None, [], None
384
+
385
+ def load_chat_and_character(chat_id: int, user_name: str) -> Tuple[Optional[Dict[str, Any]], List[Tuple[str, str]], Optional[Image.Image]]:
386
+ """
387
+ Load a chat and its associated character, including the character image and process templates.
388
+
389
+ Args:
390
+ chat_id (int): The ID of the chat to load.
391
+ user_name (str): The name of the user.
392
+
393
+ Returns:
394
+ Tuple[Optional[Dict[str, Any]], List[Tuple[str, str]], Optional[Image.Image]]:
395
+ A tuple containing the character data, processed chat history, and character image (if available).
396
+ """
397
+ try:
398
+ # Load the chat
399
+ chat = get_character_chat_by_id(chat_id)
400
+ if not chat:
401
+ logging.warning(f"No chat found with ID: {chat_id}")
402
+ return None, [], None
403
+
404
+ # Load the associated character
405
+ character_id = chat['character_id']
406
+ char_data = get_character_card_by_id(character_id)
407
+ if not char_data:
408
+ logging.warning(f"No character found for chat ID: {chat_id}")
409
+ return None, chat['chat_history'], None
410
+
411
+ # Process the chat history
412
+ processed_history = process_chat_history(chat['chat_history'], char_data['name'], user_name)
413
+
414
+ # Load the character image
415
+ img = None
416
+ if char_data.get('image'):
417
+ try:
418
+ image_data = base64.b64decode(char_data['image'])
419
+ img = Image.open(io.BytesIO(image_data)).convert("RGBA")
420
+ except Exception as e:
421
+ logging.error(f"Error processing image for character '{char_data['name']}': {e}")
422
+
423
+ # Process character data templates
424
+ for field in ['first_mes', 'mes_example', 'scenario', 'description', 'personality']:
425
+ if field in char_data:
426
+ char_data[field] = replace_placeholders(char_data[field], char_data['name'], user_name)
427
+
428
+ return char_data, processed_history, img
429
+
430
+ except Exception as e:
431
+ logging.error(f"Error in load_chat_and_character: {e}")
432
+ return None, [], None
433
+
434
+
435
+ def load_chat_history(file):
436
+ try:
437
+ content = file.read().decode('utf-8')
438
+ chat_data = json.loads(content)
439
+
440
+ # Extract history and character name from the loaded data
441
+ history = chat_data.get('history') or chat_data.get('messages')
442
+ character_name = chat_data.get('character') or chat_data.get('character_name')
443
+
444
+ if not history or not character_name:
445
+ logging.error("Chat history or character name missing in the imported file.")
446
+ return None, None
447
+
448
+ return history, character_name
449
+ except Exception as e:
450
+ logging.error(f"Error loading chat history: {e}")
451
+ return None, None
452
+
453
+ ####################################################
454
+ #
455
+ # Gradio tabs
456
+
457
+ def create_character_card_interaction_tab():
458
+ with gr.TabItem("Chat with a Character Card"):
459
+ gr.Markdown("# Chat with a Character Card")
460
+ with gr.Row():
461
+ with gr.Column(scale=1):
462
+ character_image = gr.Image(label="Character Image", type="pil")
463
+ character_card_upload = gr.File(
464
+ label="Upload Character Card (PNG, WEBP, JSON)",
465
+ file_types=[".png", ".webp", ".json"]
466
+ )
467
+ import_card_button = gr.Button("Import Character Card")
468
+ load_characters_button = gr.Button("Load Existing Characters")
469
+ character_dropdown = gr.Dropdown(label="Select Character", choices=[])
470
+ user_name_input = gr.Textbox(label="Your Name", placeholder="Enter your name here")
471
+ api_name_input = gr.Dropdown(
472
+ choices=[
473
+ "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
474
+ "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace",
475
+ "Custom-OpenAI-API"
476
+ ],
477
+ value="HuggingFace",
478
+ label="API for Interaction (Mandatory)"
479
+ )
480
+ api_key_input = gr.Textbox(
481
+ label="API Key (if not set in Config_Files/config.txt)",
482
+ placeholder="Enter your API key here", type="password"
483
+ )
484
+ temperature_slider = gr.Slider(
485
+ minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature"
486
+ )
487
+ import_chat_button = gr.Button("Import Chat History")
488
+ chat_file_upload = gr.File(label="Upload Chat History JSON", visible=True)
489
+
490
+ # Chat History Import and Search
491
+ gr.Markdown("## Search and Load Existing Chats")
492
+ chat_search_query = gr.Textbox(
493
+ label="Search Chats",
494
+ placeholder="Enter chat name or keywords to search"
495
+ )
496
+ chat_search_button = gr.Button("Search Chats")
497
+ chat_search_dropdown = gr.Dropdown(label="Search Results", choices=[], visible=False)
498
+ load_chat_button = gr.Button("Load Selected Chat", visible=False)
499
+
500
+ # Checkbox to Decide Whether to Save Chats by Default
501
+ auto_save_checkbox = gr.Checkbox(label="Save chats automatically", value=True)
502
+ chat_media_name = gr.Textbox(label="Custom Chat Name (optional)", visible=True)
503
+ save_chat_history_to_db = gr.Button("Save Chat History to Database")
504
+ save_status = gr.Textbox(label="Save Status", interactive=False)
505
+
506
+ with gr.Column(scale=2):
507
+ chat_history = gr.Chatbot(label="Conversation", height=800)
508
+ user_input = gr.Textbox(label="Your message")
509
+ send_message_button = gr.Button("Send Message")
510
+ regenerate_button = gr.Button("Regenerate Last Message")
511
+ clear_chat_button = gr.Button("Clear Chat")
512
+ save_snapshot_button = gr.Button("Save Chat Snapshot")
513
+ update_chat_dropdown = gr.Dropdown(label="Select Chat to Update", choices=[], visible=False)
514
+ load_selected_chat_button = gr.Button("Load Selected Chat", visible=False)
515
+ update_chat_button = gr.Button("Update Selected Chat", visible=False)
516
+
517
+ # States
518
+ character_data = gr.State(None)
519
+ user_name = gr.State("")
520
+ selected_chat_id = gr.State(None) # To track the selected chat for updates
521
+
522
+ # Callback Functions
523
+
524
+ def search_existing_chats(query):
525
+ results, message = search_character_chats(query)
526
+ if results:
527
+ # Format search results for dropdown
528
+ formatted_results = [
529
+ f"{chat['conversation_name']} (ID: {chat['id']})" for chat in results
530
+ ]
531
+ else:
532
+ formatted_results = []
533
+ return formatted_results, message
534
+
535
+ def load_selected_chat_from_search(selected_chat, user_name):
536
+ if not selected_chat:
537
+ return None, [], None, "No chat selected."
538
+
539
+ try:
540
+ chat_id_match = re.search(r'\(ID:\s*(\d+)\)', selected_chat)
541
+ if not chat_id_match:
542
+ return None, [], None, "Invalid chat selection format."
543
+
544
+ chat_id = int(chat_id_match.group(1))
545
+
546
+ # Use the new function to load chat and character data
547
+ char_data, chat_history, img = load_chat_and_character(chat_id, user_name)
548
+
549
+ if not char_data:
550
+ return None, [], None, "Failed to load character data for the selected chat."
551
+
552
+ return char_data, chat_history, img, f"Chat '{selected_chat}' loaded successfully."
553
+ except Exception as e:
554
+ logging.error(f"Error loading selected chat: {e}")
555
+ return None, [], None, f"Error loading chat: {e}"
556
+
557
+
558
+ def import_chat_history(file, current_history, char_data, user_name_val):
559
+ """
560
+ Imports chat history from a file, replacing '{{user}}' with the actual user name.
561
+
562
+ Args:
563
+ file (file): The uploaded chat history file.
564
+ current_history (list): The current chat history.
565
+ char_data (dict): The current character data.
566
+ user_name_val (str): The user's name.
567
+
568
+ Returns:
569
+ tuple: Updated chat history, updated character data, and a status message.
570
+ """
571
+ loaded_history, char_name = load_chat_history(file)
572
+ if loaded_history is None:
573
+ return current_history, char_data, "Failed to load chat history."
574
+
575
+ # Replace '{{user}}' in the loaded chat history
576
+ loaded_history = replace_user_placeholder(loaded_history, user_name_val)
577
+
578
+ # Check if the loaded chat is for the current character
579
+ if char_data and char_data.get('name') != char_name:
580
+ return current_history, char_data, (
581
+ f"Warning: Loaded chat is for character '{char_name}', "
582
+ f"but current character is '{char_data.get('name')}'. Chat not imported."
583
+ )
584
+
585
+ # If no character is selected, try to load the character from the chat
586
+ if not char_data:
587
+ characters = get_character_cards()
588
+ character = next((char for char in characters if char['name'] == char_name), None)
589
+ if character:
590
+ char_data = character
591
+ # Replace '{{user}}' in the first_message if necessary
592
+ if character.get('first_message'):
593
+ character['first_message'] = character['first_message'].replace("{{user}}",
594
+ user_name_val if user_name_val else "User")
595
+ else:
596
+ return current_history, char_data, (
597
+ f"Warning: Character '{char_name}' not found. Please select the character manually."
598
+ )
599
+
600
+ return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
601
+
602
+ def load_character(name):
603
+ characters = get_character_cards()
604
+ character = next((char for char in characters if char['name'] == name), None)
605
+ if character:
606
+ first_message = character.get('first_message', "Hello! I'm ready to chat.")
607
+ return character, [(None, first_message)] if first_message else [], None
608
+ return None, [], None
609
+
610
+ def load_character_image(name):
611
+ character = next((char for char in get_character_cards() if char['name'] == name), None)
612
+ if character and 'image' in character and character['image']:
613
+ try:
614
+ # Decode the base64 image
615
+ image_data = base64.b64decode(character['image'])
616
+ # Load as PIL Image
617
+ img = Image.open(io.BytesIO(image_data)).convert("RGBA")
618
+ return img
619
+ except Exception as e:
620
+ logging.error(f"Error loading image for character '{name}': {e}")
621
+ return None
622
+ return None
623
+
624
+ def character_chat_wrapper(
625
+ message, history, char_data, api_endpoint, api_key,
626
+ temperature, user_name_val, auto_save
627
+ ):
628
+ if not char_data:
629
+ return history, "Please select a character first."
630
+
631
+ user_name_val = user_name_val or "User"
632
+ char_name = char_data.get('name', 'AI Assistant')
633
+
634
+ # Prepare the character's background information
635
+ char_background = f"""
636
+ Name: {char_name}
637
+ Description: {char_data.get('description', 'N/A')}
638
+ Personality: {char_data.get('personality', 'N/A')}
639
+ Scenario: {char_data.get('scenario', 'N/A')}
640
+ """
641
+
642
+ # Prepare the system prompt
643
+ system_message = f"""You are roleplaying as {char_name}. {char_data.get('system_prompt', '')}"""
644
+
645
+ # Prepare chat context
646
+ media_content = {
647
+ 'id': char_name,
648
+ 'title': char_name,
649
+ 'content': char_background,
650
+ 'description': char_data.get('description', ''),
651
+ 'personality': char_data.get('personality', ''),
652
+ 'scenario': char_data.get('scenario', '')
653
+ }
654
+ selected_parts = ['description', 'personality', 'scenario']
655
+
656
+ prompt = char_data.get('post_history_instructions', '')
657
+
658
+ # Sanitize and format user message
659
+ user_message = sanitize_user_input(message)
660
+ user_message = replace_placeholders(user_message, char_name, user_name_val)
661
+ full_message = f"{user_name_val}: {user_message}"
662
+
663
+ # Generate bot response
664
+ bot_message = chat(
665
+ full_message,
666
+ history,
667
+ media_content,
668
+ selected_parts,
669
+ api_endpoint,
670
+ api_key,
671
+ prompt,
672
+ temperature,
673
+ system_message
674
+ )
675
+
676
+ # Replace placeholders in bot message
677
+ bot_message = replace_placeholders(bot_message, char_name, user_name_val)
678
+
679
+ # Update history
680
+ history.append((user_message, bot_message))
681
+
682
+ # Auto-save if enabled
683
+ save_status = ""
684
+ if auto_save:
685
+ character_id = char_data.get('id')
686
+ if character_id:
687
+ conversation_name = f"Auto-saved chat {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
688
+ add_character_chat(character_id, conversation_name, history)
689
+ save_status = "Chat auto-saved."
690
+ else:
691
+ save_status = "Character ID not found; chat not saved."
692
+
693
+ return history, save_status
694
+
695
+ def save_chat_history_to_db_wrapper(
696
+ chat_history, conversation_id, media_content,
697
+ chat_media_name, char_data, auto_save
698
+ ):
699
+ if not char_data or not chat_history:
700
+ return "No character or chat history available.", ""
701
+
702
+ character_id = char_data.get('id')
703
+ if not character_id:
704
+ return "Character ID not found.", ""
705
+
706
+ conversation_name = chat_media_name or f"Chat {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
707
+ chat_id = add_character_chat(character_id, conversation_name, chat_history)
708
+ if chat_id:
709
+ return f"Chat saved successfully with ID {chat_id}.", ""
710
+ else:
711
+ return "Failed to save chat.", ""
712
+
713
+ def update_character_info(name):
714
+ return load_character_and_image(name, user_name.value)
715
+
716
+ def on_character_select(name, user_name_val):
717
+ logging.debug(f"Character selected: {name}")
718
+ char_data, chat_history, img = load_character_and_image(name, user_name_val)
719
+ return char_data, chat_history, img
720
+
721
+ def clear_chat_history(char_data, user_name_val):
722
+ """
723
+ Clears the chat history and initializes it with the character's first message,
724
+ replacing the '{{user}}' placeholder with the actual user name.
725
+
726
+ Args:
727
+ char_data (dict): The current character data.
728
+ user_name_val (str): The user's name.
729
+
730
+ Returns:
731
+ tuple: Updated chat history and the unchanged char_data.
732
+ """
733
+ if char_data and 'first_message' in char_data and char_data['first_message']:
734
+ # Replace '{{user}}' in the first_message
735
+ first_message = char_data['first_message'].replace("{{user}}",
736
+ user_name_val if user_name_val else "User")
737
+ # Initialize chat history with the updated first_message
738
+ return [(None, first_message)], char_data
739
+ else:
740
+ # If no first_message is defined, simply clear the chat
741
+ return [], char_data
742
+
743
+ def regenerate_last_message(
744
+ history, char_data, api_endpoint, api_key,
745
+ temperature, user_name_val, auto_save
746
+ ):
747
+ """
748
+ Regenerates the last bot message by removing it and resending the corresponding user message.
749
+
750
+ Args:
751
+ history (list): The current chat history as a list of tuples (user_message, bot_message).
752
+ char_data (dict): The current character data.
753
+ api_endpoint (str): The API endpoint to use for the LLM.
754
+ api_key (str): The API key for authentication.
755
+ temperature (float): The temperature setting for the LLM.
756
+ user_name_val (str): The user's name.
757
+ auto_save (bool): Flag indicating whether to auto-save the chat.
758
+
759
+ Returns:
760
+ tuple: Updated chat history and a save status message.
761
+ """
762
+ if not history:
763
+ return history, "No messages to regenerate."
764
+
765
+ last_entry = history[-1]
766
+ last_user_message, last_bot_message = last_entry
767
+
768
+ # Check if the last bot message exists
769
+ if last_bot_message is None:
770
+ return history, "The last message is not from the bot."
771
+
772
+ # Remove the last bot message
773
+ new_history = history[:-1]
774
+
775
+ # Resend the last user message to generate a new bot response
776
+ if not last_user_message:
777
+ return new_history, "No user message to regenerate the bot response."
778
+
779
+ # Prepare the character's background information
780
+ char_name = char_data.get('name', 'AI Assistant')
781
+ char_background = f"""
782
+ Name: {char_name}
783
+ Description: {char_data.get('description', 'N/A')}
784
+ Personality: {char_data.get('personality', 'N/A')}
785
+ Scenario: {char_data.get('scenario', 'N/A')}
786
+ """
787
+
788
+ # Prepare the system prompt for character impersonation
789
+ system_message = f"""You are roleplaying as {char_name}, the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI. Always refer to yourself as "{char_name}" and refer to the user as "{user_name_val}".
790
+
791
+ {char_background}
792
+
793
+ Additional instructions: {char_data.get('post_history_instructions', '')}
794
+ """
795
+
796
+ # Prepare media_content and selected_parts
797
+ media_content = {
798
+ 'id': char_name,
799
+ 'title': char_name,
800
+ 'content': char_background,
801
+ 'description': char_data.get('description', ''),
802
+ 'personality': char_data.get('personality', ''),
803
+ 'scenario': char_data.get('scenario', '')
804
+ }
805
+ selected_parts = ['description', 'personality', 'scenario']
806
+
807
+ prompt = char_data.get('post_history_instructions', '')
808
+
809
+ # Prepare the input for the chat function
810
+ full_message = f"{user_name_val}: {last_user_message}" if last_user_message else f"{user_name_val}: "
811
+
812
+ # Call the chat function to get a new bot message
813
+ bot_message = chat(
814
+ full_message,
815
+ new_history,
816
+ media_content,
817
+ selected_parts,
818
+ api_endpoint,
819
+ api_key,
820
+ prompt,
821
+ temperature,
822
+ system_message
823
+ )
824
+
825
+ # Append the new bot message to the history
826
+ new_history.append((last_user_message, bot_message))
827
+
828
+ # Auto-save if enabled
829
+ if auto_save:
830
+ character_id = char_data.get('id')
831
+ if character_id:
832
+ conversation_name = f"Auto-saved chat {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
833
+ add_character_chat(character_id, conversation_name, new_history)
834
+ save_status = "Chat auto-saved."
835
+ else:
836
+ save_status = "Character ID not found; chat not saved."
837
+ else:
838
+ save_status = ""
839
+
840
+ return new_history, save_status
841
+
842
+ def toggle_chat_file_upload():
843
+ return gr.update(visible=True)
844
+
845
+ def save_untracked_chat_action(history, char_data):
846
+ if not char_data or not history:
847
+ return "No chat to save or character not selected."
848
+
849
+ character_id = char_data.get('id')
850
+ if not character_id:
851
+ return "Character ID not found."
852
+
853
+ conversation_name = f"Snapshot {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
854
+ chat_id = add_character_chat(character_id, conversation_name, history, is_snapshot=True)
855
+ if chat_id:
856
+ return f"Chat snapshot saved successfully with ID {chat_id}."
857
+ else:
858
+ return "Failed to save chat snapshot."
859
+
860
+ def select_chat_for_update():
861
+ # Fetch all chats for the selected character
862
+ if character_data.value:
863
+ character_id = character_data.value.get('id')
864
+ if character_id:
865
+ chats = get_character_chats(character_id)
866
+ chat_choices = [
867
+ f"{chat['conversation_name']} (ID: {chat['id']})" for chat in chats
868
+ ]
869
+ return gr.update(choices=chat_choices), None
870
+ return gr.update(choices=[]), "No character selected."
871
+
872
+ def load_selected_chat(chat_selection):
873
+ if not chat_selection:
874
+ return [], "No chat selected."
875
+
876
+ try:
877
+ chat_id = int(chat_selection.split('(ID: ')[1].rstrip(')'))
878
+ chat = get_character_chat_by_id(chat_id)
879
+ if chat:
880
+ history = chat['chat_history']
881
+ selected_chat_id.value = chat_id # Update the selected_chat_id state
882
+ return history, f"Loaded chat '{chat['conversation_name']}' successfully."
883
+ else:
884
+ return [], "Chat not found."
885
+ except Exception as e:
886
+ logging.error(f"Error loading selected chat: {e}")
887
+ return [], f"Error loading chat: {e}"
888
+
889
+ def update_chat(chat_id, updated_history):
890
+ success = update_character_chat(chat_id, updated_history)
891
+ if success:
892
+ return "Chat updated successfully."
893
+ else:
894
+ return "Failed to update chat."
895
+
896
+ # Define States for conversation_id and media_content, which are required for saving chat history
897
+ conversation_id = gr.State(str(uuid.uuid4()))
898
+ media_content = gr.State({})
899
+
900
+ # Button Callbacks
901
+
902
+ import_card_button.click(
903
+ fn=import_character_card,
904
+ inputs=[character_card_upload],
905
+ outputs=[character_data, character_dropdown, save_status]
906
+ )
907
+
908
+ load_characters_button.click(
909
+ fn=lambda: gr.update(choices=[f"{char['name']} (ID: {char['id']})" for char in get_character_cards()]),
910
+ outputs=character_dropdown
911
+ )
912
+
913
+ # FIXME user_name_val = validate_user_name(user_name_val)
914
+ clear_chat_button.click(
915
+ fn=clear_chat_history,
916
+ inputs=[character_data, user_name_input],
917
+ outputs=[chat_history, character_data]
918
+ )
919
+
920
+ character_dropdown.change(
921
+ fn=extract_character_id,
922
+ inputs=[character_dropdown],
923
+ outputs=character_data
924
+ ).then(
925
+ fn=load_character_wrapper,
926
+ inputs=[character_data, user_name_input],
927
+ outputs=[character_data, chat_history, character_image]
928
+ )
929
+
930
+ send_message_button.click(
931
+ fn=character_chat_wrapper,
932
+ inputs=[
933
+ user_input,
934
+ chat_history,
935
+ character_data,
936
+ api_name_input,
937
+ api_key_input,
938
+ temperature_slider,
939
+ user_name_input,
940
+ auto_save_checkbox
941
+ ],
942
+ outputs=[chat_history, save_status]
943
+ ).then(lambda: "", outputs=user_input)
944
+
945
+ regenerate_button.click(
946
+ fn=regenerate_last_message,
947
+ inputs=[
948
+ chat_history,
949
+ character_data,
950
+ api_name_input,
951
+ api_key_input,
952
+ temperature_slider,
953
+ user_name_input,
954
+ auto_save_checkbox
955
+ ],
956
+ outputs=[chat_history, save_status]
957
+ )
958
+
959
+ import_chat_button.click(
960
+ fn=lambda: gr.update(visible=True),
961
+ outputs=chat_file_upload
962
+ )
963
+
964
+ chat_file_upload.change(
965
+ fn=import_chat_history,
966
+ inputs=[chat_file_upload, chat_history, character_data],
967
+ outputs=[chat_history, character_data, save_status]
968
+ )
969
+
970
+ save_chat_history_to_db.click(
971
+ fn=save_chat_history_to_db_wrapper,
972
+ inputs=[
973
+ chat_history,
974
+ conversation_id,
975
+ media_content,
976
+ chat_media_name,
977
+ character_data,
978
+ auto_save_checkbox # Pass the auto_save state
979
+ ],
980
+ outputs=[conversation_id, save_status]
981
+ )
982
+
983
+ # Populate the update_chat_dropdown based on selected character
984
+ character_dropdown.change(
985
+ fn=select_chat_for_update,
986
+ inputs=[],
987
+ outputs=[update_chat_dropdown, save_status]
988
+ )
989
+
990
+ load_selected_chat_button.click(
991
+ fn=load_selected_chat,
992
+ inputs=[update_chat_dropdown],
993
+ outputs=[chat_history, save_status]
994
+ )
995
+
996
+ save_snapshot_button.click(
997
+ fn=save_untracked_chat_action,
998
+ inputs=[chat_history, character_data],
999
+ outputs=save_status
1000
+ )
1001
+
1002
+ update_chat_button.click(
1003
+ fn=update_chat,
1004
+ inputs=[selected_chat_id, chat_history],
1005
+ outputs=save_status
1006
+ )
1007
+
1008
+ # Search Chats
1009
+ chat_search_button.click(
1010
+ fn=search_existing_chats,
1011
+ inputs=[chat_search_query],
1012
+ outputs=[chat_search_dropdown, save_status]
1013
+ ).then(
1014
+ fn=lambda choices, msg: gr.update(choices=choices, visible=True) if choices else gr.update(visible=False),
1015
+ inputs=[chat_search_dropdown, save_status],
1016
+ outputs=[chat_search_dropdown]
1017
+ )
1018
+
1019
+ # Load Selected Chat from Search
1020
+ load_chat_button.click(
1021
+ fn=load_selected_chat_from_search,
1022
+ inputs=[chat_search_dropdown, user_name_input],
1023
+ outputs=[character_data, chat_history, character_image, save_status]
1024
+ )
1025
+
1026
+ # Show Load Chat Button when a chat is selected
1027
+ chat_search_dropdown.change(
1028
+ fn=lambda selected: gr.update(visible=True) if selected else gr.update(visible=False),
1029
+ inputs=[chat_search_dropdown],
1030
+ outputs=[load_chat_button]
1031
+ )
1032
+
1033
+
1034
+ return character_data, chat_history, user_input, user_name, character_image
1035
+
1036
+
1037
+ def create_character_chat_mgmt_tab():
1038
+ with gr.TabItem("Chat Management"):
1039
+ gr.Markdown("# Chat Management")
1040
+
1041
+ with gr.Row():
1042
+ # Search Section
1043
+ with gr.Column(scale=1):
1044
+ gr.Markdown("## Search Conversations or Characters")
1045
+ search_query = gr.Textbox(label="Search Conversations or Characters", placeholder="Enter search keywords")
1046
+ search_button = gr.Button("Search")
1047
+ search_results = gr.Dropdown(label="Search Results", choices=[], visible=False)
1048
+ search_status = gr.Markdown("", visible=True)
1049
+
1050
+ # Select Character and Chat Section
1051
+ with gr.Column(scale=1):
1052
+ gr.Markdown("## Select Character and Associated Chats")
1053
+ characters = get_character_cards()
1054
+ character_choices = [f"{char['name']} (ID: {char['id']})" for char in characters]
1055
+ select_character = gr.Dropdown(label="Select Character", choices=character_choices, interactive=True)
1056
+ select_chat = gr.Dropdown(label="Select Chat", choices=[], visible=False, interactive=True)
1057
+ load_chat_button = gr.Button("Load Selected Chat", visible=False)
1058
+
1059
+ with gr.Row():
1060
+ conversation_list = gr.Dropdown(label="Select Conversation or Character", choices=[])
1061
+ conversation_mapping = gr.State({})
1062
+
1063
+ with gr.Tabs():
1064
+ with gr.TabItem("Edit"):
1065
+ chat_content = gr.TextArea(label="Chat/Character Content (JSON)", lines=20, max_lines=50)
1066
+ save_button = gr.Button("Save Changes")
1067
+ delete_button = gr.Button("Delete Conversation/Character", variant="stop")
1068
+
1069
+ with gr.TabItem("Preview"):
1070
+ chat_preview = gr.HTML(label="Chat/Character Preview")
1071
+ result_message = gr.Markdown("")
1072
+
1073
+ # Callback Functions
1074
+
1075
+ def search_conversations_or_characters(query):
1076
+ if not query.strip():
1077
+ return gr.update(choices=[], visible=False), "Please enter a search query."
1078
+
1079
+ try:
1080
+ # Search Chats using FTS5
1081
+ chat_results, chat_message = search_character_chats(query)
1082
+
1083
+ # Format chat results
1084
+ formatted_chat_results = [
1085
+ f"Chat: {chat['conversation_name']} (ID: {chat['id']})" for chat in chat_results
1086
+ ]
1087
+
1088
+ # Search Characters using substring match
1089
+ characters = get_character_cards()
1090
+ filtered_characters = [
1091
+ char for char in characters
1092
+ if query.lower() in char['name'].lower()
1093
+ ]
1094
+ formatted_character_results = [
1095
+ f"Character: {char['name']} (ID: {char['id']})" for char in filtered_characters
1096
+ ]
1097
+
1098
+ # Combine results
1099
+ all_choices = formatted_chat_results + formatted_character_results
1100
+ mapping = {choice: conv['id'] for choice, conv in zip(formatted_chat_results, chat_results)}
1101
+ mapping.update({choice: char['id'] for choice, char in zip(formatted_character_results, filtered_characters)})
1102
+
1103
+ if all_choices:
1104
+ return gr.update(choices=all_choices, visible=True), f"Found {len(all_choices)} result(s) matching '{query}'."
1105
+ else:
1106
+ return gr.update(choices=[], visible=False), f"No results found for '{query}'."
1107
+
1108
+ except Exception as e:
1109
+ logging.error(f"Error during search: {e}")
1110
+ return gr.update(choices=[], visible=False), f"Error occurred during search: {e}"
1111
+
1112
+ def load_conversation_or_character(selected, conversation_mapping):
1113
+ if not selected or selected not in conversation_mapping:
1114
+ return "", "<p>No selection made.</p>"
1115
+
1116
+ selected_id = conversation_mapping[selected]
1117
+ if selected.startswith("Chat:"):
1118
+ chat = get_character_chat_by_id(selected_id)
1119
+ if chat:
1120
+ json_content = json.dumps({
1121
+ "conversation_id": chat['id'],
1122
+ "conversation_name": chat['conversation_name'],
1123
+ "messages": chat['chat_history']
1124
+ }, indent=2)
1125
+
1126
+ html_preview = create_chat_preview_html(chat['chat_history'])
1127
+ return json_content, html_preview
1128
+ elif selected.startswith("Character:"):
1129
+ character = get_character_card_by_id(selected_id)
1130
+ if character:
1131
+ json_content = json.dumps({
1132
+ "id": character['id'],
1133
+ "name": character['name'],
1134
+ "description": character['description'],
1135
+ "personality": character['personality'],
1136
+ "scenario": character['scenario'],
1137
+ "post_history_instructions": character['post_history_instructions'],
1138
+ "first_mes": character['first_mes'],
1139
+ "mes_example": character['mes_example'],
1140
+ "creator_notes": character.get('creator_notes', ''),
1141
+ "system_prompt": character.get('system_prompt', ''),
1142
+ "tags": character.get('tags', []),
1143
+ "creator": character.get('creator', ''),
1144
+ "character_version": character.get('character_version', ''),
1145
+ "extensions": character.get('extensions', {})
1146
+ }, indent=2)
1147
+
1148
+ html_preview = create_character_preview_html(character)
1149
+ return json_content, html_preview
1150
+
1151
+ return "", "<p>Unable to load the selected item.</p>"
1152
+
1153
+ def validate_content(selected, content):
1154
+ try:
1155
+ data = json.loads(content)
1156
+ if selected.startswith("Chat:"):
1157
+ assert "conversation_id" in data and "messages" in data
1158
+ elif selected.startswith("Character:"):
1159
+ assert "id" in data and "name" in data
1160
+ return True, data
1161
+ except Exception as e:
1162
+ return False, f"Invalid JSON: {e}"
1163
+
1164
+ def save_conversation_or_character(selected, conversation_mapping, content):
1165
+ if not selected or selected not in conversation_mapping:
1166
+ return "Please select an item to save.", "<p>No changes made.</p>"
1167
+
1168
+ is_valid, result = validate_content(selected, content)
1169
+ if not is_valid:
1170
+ return f"Error: {result}", "<p>No changes made due to validation error.</p>"
1171
+
1172
+ selected_id = conversation_mapping[selected]
1173
+
1174
+ if selected.startswith("Chat:"):
1175
+ success = update_character_chat(selected_id, result['messages'])
1176
+ return ("Chat updated successfully." if success else "Failed to update chat."), ("<p>Chat updated.</p>" if success else "<p>Failed to update chat.</p>")
1177
+ elif selected.startswith("Character:"):
1178
+ success = update_character_card(selected_id, result)
1179
+ return ("Character updated successfully." if success else "Failed to update character."), ("<p>Character updated.</p>" if success else "<p>Failed to update character.</p>")
1180
+
1181
+ return "Unknown item type.", "<p>No changes made.</p>"
1182
+
1183
+ def delete_conversation_or_character(selected, conversation_mapping):
1184
+ if not selected or selected not in conversation_mapping:
1185
+ return "Please select an item to delete.", "<p>No changes made.</p>", gr.update(choices=[])
1186
+
1187
+ selected_id = conversation_mapping[selected]
1188
+
1189
+ if selected.startswith("Chat:"):
1190
+ success = delete_character_chat(selected_id)
1191
+ elif selected.startswith("Character:"):
1192
+ success = delete_character_card(selected_id)
1193
+ else:
1194
+ return "Unknown item type.", "<p>No changes made.</p>", gr.update()
1195
+
1196
+ if success:
1197
+ updated_choices = [choice for choice in conversation_mapping.keys() if choice != selected]
1198
+ conversation_mapping.value.pop(selected, None)
1199
+ return f"{selected.split(':')[0]} deleted successfully.", f"<p>{selected.split(':')[0]} deleted.</p>", gr.update(choices=updated_choices)
1200
+ else:
1201
+ return f"Failed to delete {selected.split(':')[0].lower()}.", f"<p>Failed to delete {selected.split(':')[0].lower()}.</p>", gr.update()
1202
+
1203
+ def populate_chats(character_selection):
1204
+ if not character_selection:
1205
+ return gr.update(choices=[], visible=False), "Please select a character first."
1206
+
1207
+ try:
1208
+ character_id = int(character_selection.split('(ID: ')[1].rstrip(')'))
1209
+ chats = get_character_chats(character_id=character_id)
1210
+
1211
+ if not chats:
1212
+ return gr.update(choices=[], visible=False), f"No chats found for the selected character."
1213
+
1214
+ formatted_chats = [f"{chat['conversation_name']} (ID: {chat['id']})" for chat in chats]
1215
+ return gr.update(choices=formatted_chats, visible=True), f"Found {len(formatted_chats)} chat(s)."
1216
+ except Exception as e:
1217
+ logging.error(f"Error populating chats: {e}")
1218
+ return gr.update(choices=[], visible=False), f"Error occurred: {e}"
1219
+
1220
+ def load_chat_from_character(selected_chat):
1221
+ if not selected_chat:
1222
+ return "", "<p>No chat selected.</p>"
1223
+
1224
+ try:
1225
+ chat_id = int(selected_chat.split('(ID: ')[1].rstrip(')'))
1226
+ chat = get_character_chat_by_id(chat_id)
1227
+ if not chat:
1228
+ return "", "<p>Selected chat not found.</p>"
1229
+
1230
+ json_content = json.dumps({
1231
+ "conversation_id": chat['id'],
1232
+ "conversation_name": chat['conversation_name'],
1233
+ "messages": chat['chat_history']
1234
+ }, indent=2)
1235
+
1236
+ html_preview = create_chat_preview_html(chat['chat_history'])
1237
+ return json_content, html_preview
1238
+ except Exception as e:
1239
+ logging.error(f"Error loading chat: {e}")
1240
+ return "", f"<p>Error loading chat: {e}</p>"
1241
+
1242
+ def create_chat_preview_html(chat_history):
1243
+ html_preview = "<div style='max-height: 500px; overflow-y: auto;'>"
1244
+ for user_msg, bot_msg in chat_history:
1245
+ user_style = "background-color: #e6f3ff; padding: 10px; border-radius: 5px; margin-bottom: 5px;"
1246
+ bot_style = "background-color: #f0f0f0; padding: 10px; border-radius: 5px; margin-bottom: 10px;"
1247
+ html_preview += f"<div style='{user_style}'><strong>User:</strong> {user_msg}</div>"
1248
+ html_preview += f"<div style='{bot_style}'><strong>Bot:</strong> {bot_msg}</div>"
1249
+ html_preview += "</div>"
1250
+ return html_preview
1251
+
1252
+ def create_character_preview_html(character):
1253
+ return f"""
1254
+ <div>
1255
+ <h2>{character['name']}</h2>
1256
+ <p><strong>Description:</strong> {character['description']}</p>
1257
+ <p><strong>Personality:</strong> {character['personality']}</p>
1258
+ <p><strong>Scenario:</strong> {character['scenario']}</p>
1259
+ <p><strong>First Message:</strong> {character['first_mes']}</p>
1260
+ <p><strong>Example Message:</strong> {character['mes_example']}</p>
1261
+ <p><strong>Post History Instructions:</strong> {character['post_history_instructions']}</p>
1262
+ <p><strong>System Prompt:</strong> {character.get('system_prompt', 'N/A')}</p>
1263
+ <p><strong>Tags:</strong> {', '.join(character.get('tags', []))}</p>
1264
+ <p><strong>Creator:</strong> {character.get('creator', 'N/A')}</p>
1265
+ <p><strong>Version:</strong> {character.get('character_version', 'N/A')}</p>
1266
+ </div>
1267
+ """
1268
+
1269
+ # Register Callback Functions with Gradio Components
1270
+ search_button.click(
1271
+ fn=search_conversations_or_characters,
1272
+ inputs=[search_query],
1273
+ outputs=[search_results, search_status]
1274
+ )
1275
+
1276
+ search_results.change(
1277
+ fn=load_conversation_or_character,
1278
+ inputs=[search_results, conversation_mapping],
1279
+ outputs=[chat_content, chat_preview]
1280
+ )
1281
+
1282
+ save_button.click(
1283
+ fn=save_conversation_or_character,
1284
+ inputs=[conversation_list, conversation_mapping, chat_content],
1285
+ outputs=[result_message, chat_preview]
1286
+ )
1287
+
1288
+ delete_button.click(
1289
+ fn=delete_conversation_or_character,
1290
+ inputs=[conversation_list, conversation_mapping],
1291
+ outputs=[result_message, chat_preview, conversation_list]
1292
+ )
1293
+
1294
+ select_character.change(
1295
+ fn=populate_chats,
1296
+ inputs=[select_character],
1297
+ outputs=[select_chat, search_status]
1298
+ )
1299
+
1300
+ select_chat.change(
1301
+ fn=load_chat_from_character,
1302
+ inputs=[select_chat],
1303
+ outputs=[chat_content, chat_preview]
1304
+ )
1305
+
1306
+ load_chat_button.click(
1307
+ fn=load_chat_from_character,
1308
+ inputs=[select_chat],
1309
+ outputs=[chat_content, chat_preview]
1310
+ )
1311
+
1312
+ return (
1313
+ search_query, search_button, search_results, search_status,
1314
+ select_character, select_chat, load_chat_button,
1315
+ conversation_list, conversation_mapping,
1316
+ chat_content, save_button, delete_button,
1317
+ chat_preview, result_message
1318
+ )
App_Function_Libraries/Gradio_UI/Character_interaction_tab.py ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Character_Interaction_tab.py
2
+ # Description: This file contains the functions that are used for Character Interactions in the Gradio UI.
3
+ #
4
+ # Imports
5
+ import base64
6
+ import io
7
+ import uuid
8
+ from datetime import datetime as datetime
9
+ import logging
10
+ import json
11
+ import os
12
+ from typing import List, Dict, Tuple, Union
13
+
14
+ #
15
+ # External Imports
16
+ import gradio as gr
17
+ from PIL import Image
18
+ #
19
+ # Local Imports
20
+ from App_Function_Libraries.Chat import chat, load_characters, save_chat_history_to_db_wrapper
21
+ from App_Function_Libraries.Gradio_UI.Chat_ui import chat_wrapper
22
+ from App_Function_Libraries.Gradio_UI.Writing_tab import generate_writing_feedback
23
+ #
24
+ ########################################################################################################################
25
+ #
26
+ # Single-Character chat Functions:
27
+
28
+
29
+ def chat_with_character(user_message, history, char_data, api_name_input, api_key):
30
+ if char_data is None:
31
+ return history, "Please import a character card first."
32
+
33
+ bot_message = generate_writing_feedback(user_message, char_data['name'], "Overall", api_name_input,
34
+ api_key)
35
+ history.append((user_message, bot_message))
36
+ return history, ""
37
+
38
+
39
+ def import_character_card(file):
40
+ if file is None:
41
+ logging.warning("No file provided for character card import")
42
+ return None
43
+ try:
44
+ if file.name.lower().endswith(('.png', '.webp')):
45
+ logging.info(f"Attempting to import character card from image: {file.name}")
46
+ json_data = extract_json_from_image(file)
47
+ if json_data:
48
+ logging.info("JSON data extracted from image, attempting to parse")
49
+ card_data = import_character_card_json(json_data)
50
+ if card_data:
51
+ # Save the image data
52
+ with Image.open(file) as img:
53
+ img_byte_arr = io.BytesIO()
54
+ img.save(img_byte_arr, format='PNG')
55
+ card_data['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
56
+ return card_data
57
+ else:
58
+ logging.warning("No JSON data found in the image")
59
+ else:
60
+ logging.info(f"Attempting to import character card from JSON file: {file.name}")
61
+ content = file.read().decode('utf-8')
62
+ return import_character_card_json(content)
63
+ except Exception as e:
64
+ logging.error(f"Error importing character card: {e}")
65
+ return None
66
+
67
+
68
+ def import_character_card_json(json_content):
69
+ try:
70
+ # Remove any leading/trailing whitespace
71
+ json_content = json_content.strip()
72
+
73
+ # Log the first 100 characters of the content
74
+ logging.debug(f"JSON content (first 100 chars): {json_content[:100]}...")
75
+
76
+ card_data = json.loads(json_content)
77
+ logging.debug(f"Parsed JSON data keys: {list(card_data.keys())}")
78
+ if 'spec' in card_data and card_data['spec'] == 'chara_card_v2':
79
+ logging.info("Detected V2 character card")
80
+ return card_data['data']
81
+ else:
82
+ logging.info("Assuming V1 character card")
83
+ return card_data
84
+ except json.JSONDecodeError as e:
85
+ logging.error(f"JSON decode error: {e}")
86
+ logging.error(f"Problematic JSON content: {json_content[:500]}...")
87
+ except Exception as e:
88
+ logging.error(f"Unexpected error parsing JSON: {e}")
89
+ return None
90
+
91
+
92
+ def extract_json_from_image(image_file):
93
+ logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
94
+ try:
95
+ with Image.open(image_file) as img:
96
+ logging.debug("Image opened successfully")
97
+ metadata = img.info
98
+ if 'chara' in metadata:
99
+ logging.debug("Found 'chara' in image metadata")
100
+ chara_content = metadata['chara']
101
+ logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
102
+ try:
103
+ decoded_content = base64.b64decode(chara_content).decode('utf-8')
104
+ logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
105
+ return decoded_content
106
+ except Exception as e:
107
+ logging.error(f"Error decoding base64 content: {e}")
108
+
109
+ logging.debug("'chara' not found in metadata, checking for base64 encoded data")
110
+ raw_data = img.tobytes()
111
+ possible_json = raw_data.split(b'{', 1)[-1].rsplit(b'}', 1)[0]
112
+ if possible_json:
113
+ try:
114
+ decoded = base64.b64decode(possible_json).decode('utf-8')
115
+ if decoded.startswith('{') and decoded.endswith('}'):
116
+ logging.debug("Found and decoded base64 JSON data")
117
+ return '{' + decoded + '}'
118
+ except Exception as e:
119
+ logging.error(f"Error decoding base64 data: {e}")
120
+
121
+ logging.warning("No JSON data found in the image")
122
+ except Exception as e:
123
+ logging.error(f"Error extracting JSON from image: {e}")
124
+ return None
125
+
126
+
127
+ def load_chat_history(file):
128
+ try:
129
+ content = file.read().decode('utf-8')
130
+ chat_data = json.loads(content)
131
+ return chat_data['history'], chat_data['character']
132
+ except Exception as e:
133
+ logging.error(f"Error loading chat history: {e}")
134
+ return None, None
135
+
136
+
137
+ # FIXME - deprecated keeping until sure no longer needed
138
+ # def create_character_card_interaction_tab():
139
+ # with gr.TabItem("Chat with a Character Card"):
140
+ # gr.Markdown("# Chat with a Character Card")
141
+ # with gr.Row():
142
+ # with gr.Column(scale=1):
143
+ # character_image = gr.Image(label="Character Image", type="filepath")
144
+ # character_card_upload = gr.File(label="Upload Character Card")
145
+ # import_card_button = gr.Button("Import Character Card")
146
+ # load_characters_button = gr.Button("Load Existing Characters")
147
+ # from App_Function_Libraries.Chat import get_character_names
148
+ # character_dropdown = gr.Dropdown(label="Select Character", choices=get_character_names())
149
+ # user_name_input = gr.Textbox(label="Your Name", placeholder="Enter your name here")
150
+ # api_name_input = gr.Dropdown(
151
+ # choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
152
+ # "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace",
153
+ # "Custom-OpenAI-API"],
154
+ # value="HuggingFace",
155
+ # # FIXME - make it so the user cant' click `Send Message` without first setting an API + Chatbot
156
+ # label="API for Interaction(Mandatory)"
157
+ # )
158
+ # api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)",
159
+ # placeholder="Enter your API key here", type="password")
160
+ # temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
161
+ # import_chat_button = gr.Button("Import Chat History")
162
+ # chat_file_upload = gr.File(label="Upload Chat History JSON", visible=False)
163
+ #
164
+ # with gr.Column(scale=2):
165
+ # chat_history = gr.Chatbot(label="Conversation", height=800)
166
+ # user_input = gr.Textbox(label="Your message")
167
+ # send_message_button = gr.Button("Send Message")
168
+ # regenerate_button = gr.Button("Regenerate Last Message")
169
+ # clear_chat_button = gr.Button("Clear Chat")
170
+ # chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
171
+ # save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
172
+ # save_status = gr.Textbox(label="Save Status", interactive=False)
173
+ #
174
+ # character_data = gr.State(None)
175
+ # user_name = gr.State("")
176
+ #
177
+ # def import_chat_history(file, current_history, char_data):
178
+ # loaded_history, char_name = load_chat_history(file)
179
+ # if loaded_history is None:
180
+ # return current_history, char_data, "Failed to load chat history."
181
+ #
182
+ # # Check if the loaded chat is for the current character
183
+ # if char_data and char_data.get('name') != char_name:
184
+ # return current_history, char_data, f"Warning: Loaded chat is for character '{char_name}', but current character is '{char_data.get('name')}'. Chat not imported."
185
+ #
186
+ # # If no character is selected, try to load the character from the chat
187
+ # if not char_data:
188
+ # new_char_data = load_character(char_name)[0]
189
+ # if new_char_data:
190
+ # char_data = new_char_data
191
+ # else:
192
+ # return current_history, char_data, f"Warning: Character '{char_name}' not found. Please select the character manually."
193
+ #
194
+ # return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
195
+ #
196
+ # def import_character(file):
197
+ # card_data = import_character_card(file)
198
+ # if card_data:
199
+ # from App_Function_Libraries.Chat import save_character
200
+ # save_character(card_data)
201
+ # return card_data, gr.update(choices=get_character_names())
202
+ # else:
203
+ # return None, gr.update()
204
+ #
205
+ # def load_character(name):
206
+ # from App_Function_Libraries.Chat import load_characters
207
+ # characters = load_characters()
208
+ # char_data = characters.get(name)
209
+ # if char_data:
210
+ # first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
211
+ # return char_data, [(None, first_message)] if first_message else [], None
212
+ # return None, [], None
213
+ #
214
+ # def load_character_image(name):
215
+ # from App_Function_Libraries.Chat import load_characters
216
+ # characters = load_characters()
217
+ # char_data = characters.get(name)
218
+ # if char_data and 'image_path' in char_data:
219
+ # image_path = char_data['image_path']
220
+ # if os.path.exists(image_path):
221
+ # return image_path
222
+ # else:
223
+ # logging.warning(f"Image file not found: {image_path}")
224
+ # return None
225
+ #
226
+ # def load_character_and_image(name):
227
+ # char_data, chat_history, _ = load_character(name)
228
+ # image_path = load_character_image(name)
229
+ # logging.debug(f"Character: {name}")
230
+ # logging.debug(f"Character data: {char_data}")
231
+ # logging.debug(f"Image path: {image_path}")
232
+ # return char_data, chat_history, image_path
233
+ #
234
+ # def character_chat_wrapper(message, history, char_data, api_endpoint, api_key, temperature, user_name):
235
+ # logging.debug("Entered character_chat_wrapper")
236
+ # if char_data is None:
237
+ # return "Please select a character first.", history
238
+ #
239
+ # if not user_name:
240
+ # user_name = "User"
241
+ #
242
+ # char_name = char_data.get('name', 'AI Assistant')
243
+ #
244
+ # # Prepare the character's background information
245
+ # char_background = f"""
246
+ # Name: {char_name}
247
+ # Description: {char_data.get('description', 'N/A')}
248
+ # Personality: {char_data.get('personality', 'N/A')}
249
+ # Scenario: {char_data.get('scenario', 'N/A')}
250
+ # """
251
+ #
252
+ # # Prepare the system prompt for character impersonation
253
+ # system_message = f"""You are roleplaying as {char_name}, the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI. Always refer to yourself as "{char_name}" and refer to the user as "{user_name}".
254
+ #
255
+ # {char_background}
256
+ #
257
+ # Additional instructions: {char_data.get('post_history_instructions', '')}
258
+ # """
259
+ #
260
+ # # Prepare media_content and selected_parts
261
+ # media_content = {
262
+ # 'id': char_name,
263
+ # 'title': char_name,
264
+ # 'content': char_background,
265
+ # 'description': char_data.get('description', ''),
266
+ # 'personality': char_data.get('personality', ''),
267
+ # 'scenario': char_data.get('scenario', '')
268
+ # }
269
+ # selected_parts = ['description', 'personality', 'scenario']
270
+ #
271
+ # prompt = char_data.get('post_history_instructions', '')
272
+ #
273
+ # # Prepare the input for the chat function
274
+ # if not history:
275
+ # full_message = f"{prompt}\n\n{user_name}: {message}" if prompt else f"{user_name}: {message}"
276
+ # else:
277
+ # full_message = f"{user_name}: {message}"
278
+ #
279
+ # # Call the chat function
280
+ # bot_message = chat(
281
+ # full_message,
282
+ # history,
283
+ # media_content,
284
+ # selected_parts,
285
+ # api_endpoint,
286
+ # api_key,
287
+ # prompt,
288
+ # temperature,
289
+ # system_message
290
+ # )
291
+ #
292
+ # # Update history
293
+ # history.append((message, bot_message))
294
+ # return history
295
+ #
296
+ # def save_chat_history(history, character_name):
297
+ # # Create the Saved_Chats folder if it doesn't exist
298
+ # save_directory = "Saved_Chats"
299
+ # os.makedirs(save_directory, exist_ok=True)
300
+ #
301
+ # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
302
+ # filename = f"chat_history_{character_name}_{timestamp}.json"
303
+ # filepath = os.path.join(save_directory, filename)
304
+ #
305
+ # chat_data = {
306
+ # "character": character_name,
307
+ # "timestamp": timestamp,
308
+ # "history": history
309
+ # }
310
+ #
311
+ # try:
312
+ # with open(filepath, 'w', encoding='utf-8') as f:
313
+ # json.dump(chat_data, f, ensure_ascii=False, indent=2)
314
+ # return filepath
315
+ # except Exception as e:
316
+ # return f"Error saving chat: {str(e)}"
317
+ #
318
+ # def save_current_chat(history, char_data):
319
+ # if not char_data or not history:
320
+ # return "No chat to save or character not selected."
321
+ #
322
+ # character_name = char_data.get('name', 'Unknown')
323
+ # result = save_chat_history(history, character_name)
324
+ # if result.startswith("Error"):
325
+ # return result
326
+ # return f"Chat saved successfully as {result}"
327
+ #
328
+ # def regenerate_last_message(history, char_data, api_name, api_key, temperature, user_name):
329
+ # if not history:
330
+ # return history
331
+ #
332
+ # last_user_message = history[-1][0]
333
+ # new_history = history[:-1]
334
+ #
335
+ # return character_chat_wrapper(last_user_message, new_history, char_data, api_name, api_key, temperature,
336
+ # user_name)
337
+ #
338
+ # import_chat_button.click(
339
+ # fn=lambda: gr.update(visible=True),
340
+ # outputs=chat_file_upload
341
+ # )
342
+ #
343
+ # chat_file_upload.change(
344
+ # fn=import_chat_history,
345
+ # inputs=[chat_file_upload, chat_history, character_data],
346
+ # outputs=[chat_history, character_data, save_status]
347
+ # )
348
+ #
349
+ # def update_character_info(name):
350
+ # from App_Function_Libraries.Chat import load_characters
351
+ # characters = load_characters()
352
+ # char_data = characters.get(name)
353
+ #
354
+ # image_path = char_data.get('image_path') if char_data else None
355
+ #
356
+ # logging.debug(f"Character: {name}")
357
+ # logging.debug(f"Character data: {char_data}")
358
+ # logging.debug(f"Image path: {image_path}")
359
+ #
360
+ # if image_path:
361
+ # if os.path.exists(image_path):
362
+ # logging.debug(f"Image file exists at {image_path}")
363
+ # if os.access(image_path, os.R_OK):
364
+ # logging.debug(f"Image file is readable")
365
+ # else:
366
+ # logging.warning(f"Image file is not readable: {image_path}")
367
+ # image_path = None
368
+ # else:
369
+ # logging.warning(f"Image file does not exist: {image_path}")
370
+ # image_path = None
371
+ # else:
372
+ # logging.warning("No image path provided for the character")
373
+ #
374
+ # return char_data, None, image_path # Return None for chat_history
375
+ #
376
+ # def on_character_select(name):
377
+ # logging.debug(f"Character selected: {name}")
378
+ # return update_character_info_with_error_handling(name)
379
+ #
380
+ # def clear_chat_history():
381
+ # return [], None # Return empty list for chat_history and None for character_data
382
+ #
383
+ # def update_character_info_with_error_handling(name):
384
+ # logging.debug(f"Entering update_character_info_with_error_handling for character: {name}")
385
+ # try:
386
+ # char_data, _, image_path = update_character_info(name)
387
+ # logging.debug(f"Retrieved data: char_data={bool(char_data)}, image_path={image_path}")
388
+ #
389
+ # if char_data:
390
+ # first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
391
+ # chat_history = [(None, first_message)] if first_message else []
392
+ # else:
393
+ # chat_history = []
394
+ #
395
+ # logging.debug(f"Created chat_history with length: {len(chat_history)}")
396
+ #
397
+ # if image_path and os.path.exists(image_path):
398
+ # logging.debug(f"Image file exists at {image_path}")
399
+ # return char_data, chat_history, image_path
400
+ # else:
401
+ # logging.warning(f"Image not found or invalid path: {image_path}")
402
+ # return char_data, chat_history, None
403
+ # except Exception as e:
404
+ # logging.error(f"Error updating character info: {str(e)}", exc_info=True)
405
+ # return None, [], None
406
+ # finally:
407
+ # logging.debug("Exiting update_character_info_with_error_handling")
408
+ #
409
+ # # Define States for conversation_id and media_content, which are required for saving chat history
410
+ # conversation_id = gr.State(str(uuid.uuid4()))
411
+ # media_content = gr.State({})
412
+ #
413
+ # import_card_button.click(
414
+ # fn=import_character,
415
+ # inputs=[character_card_upload],
416
+ # outputs=[character_data, character_dropdown]
417
+ # )
418
+ #
419
+ # load_characters_button.click(
420
+ # fn=lambda: gr.update(choices=get_character_names()),
421
+ # outputs=character_dropdown
422
+ # )
423
+ #
424
+ # clear_chat_button.click(
425
+ # fn=clear_chat_history,
426
+ # inputs=[],
427
+ # outputs=[chat_history, character_data]
428
+ # )
429
+ #
430
+ # character_dropdown.change(
431
+ # fn=on_character_select,
432
+ # inputs=[character_dropdown],
433
+ # outputs=[character_data, chat_history, character_image]
434
+ # )
435
+ #
436
+ # send_message_button.click(
437
+ # fn=character_chat_wrapper,
438
+ # inputs=[user_input, chat_history, character_data, api_name_input, api_key_input, temperature_slider,
439
+ # user_name_input],
440
+ # outputs=[chat_history]
441
+ # ).then(lambda: "", outputs=user_input)
442
+ #
443
+ # regenerate_button.click(
444
+ # fn=regenerate_last_message,
445
+ # inputs=[chat_history, character_data, api_name_input, api_key_input, temperature_slider, user_name_input],
446
+ # outputs=[chat_history]
447
+ # )
448
+ #
449
+ # user_name_input.change(
450
+ # fn=lambda name: name,
451
+ # inputs=[user_name_input],
452
+ # outputs=[user_name]
453
+ # )
454
+ #
455
+ # # FIXME - Implement saving chat history to database; look at Chat_UI.py for reference
456
+ # save_chat_history_to_db.click(
457
+ # save_chat_history_to_db_wrapper,
458
+ # inputs=[chat_history, conversation_id, media_content, chat_media_name],
459
+ # outputs=[conversation_id, gr.Textbox(label="Save Status")]
460
+ # )
461
+ #
462
+ # return character_data, chat_history, user_input, user_name, character_image
463
+
464
+
465
+ #
466
+ # End of Character chat tab
467
+ ######################################################################################################################
468
+ #
469
+ # Multi-Character Chat Interface
470
+
471
+ def character_interaction_setup():
472
+ characters = load_characters()
473
+ return characters, [], None, None
474
+
475
+
476
+ def extract_character_response(response: Union[str, Tuple]) -> str:
477
+ if isinstance(response, tuple):
478
+ # If it's a tuple, try to extract the first string element
479
+ for item in response:
480
+ if isinstance(item, str):
481
+ return item.strip()
482
+ # If no string found, return a default message
483
+ return "I'm not sure how to respond."
484
+ elif isinstance(response, str):
485
+ # If it's already a string, just return it
486
+ return response.strip()
487
+ else:
488
+ # For any other type, return a default message
489
+ return "I'm having trouble forming a response."
490
+
491
+ # def process_character_response(response: str) -> str:
492
+ # # Remove any leading explanatory text before the first '---'
493
+ # parts = response.split('---')
494
+ # if len(parts) > 1:
495
+ # return '---' + '---'.join(parts[1:])
496
+ # return response.strip()
497
+ def process_character_response(response: Union[str, Tuple]) -> str:
498
+ if isinstance(response, tuple):
499
+ response = ' '.join(str(item) for item in response if isinstance(item, str))
500
+
501
+ if isinstance(response, str):
502
+ # Remove any leading explanatory text before the first '---'
503
+ parts = response.split('---')
504
+ if len(parts) > 1:
505
+ return '---' + '---'.join(parts[1:])
506
+ return response.strip()
507
+ else:
508
+ return "I'm having trouble forming a response."
509
+
510
+ def character_turn(characters: Dict, conversation: List[Tuple[str, str]],
511
+ current_character: str, other_characters: List[str],
512
+ api_endpoint: str, api_key: str, temperature: float,
513
+ scenario: str = "") -> Tuple[List[Tuple[str, str]], str]:
514
+ if not current_character or current_character not in characters:
515
+ return conversation, current_character
516
+
517
+ if not conversation and scenario:
518
+ conversation.append(("Scenario", scenario))
519
+
520
+ current_char = characters[current_character]
521
+ other_chars = [characters[char] for char in other_characters if char in characters and char != current_character]
522
+
523
+ prompt = f"{current_char['name']}'s personality: {current_char['personality']}\n"
524
+ for char in other_chars:
525
+ prompt += f"{char['name']}'s personality: {char['personality']}\n"
526
+ prompt += "Conversation so far:\n" + "\n".join([f"{sender}: {message}" for sender, message in conversation])
527
+ prompt += f"\n\nHow would {current_char['name']} respond?"
528
+
529
+ try:
530
+ response = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None, False, temperature, "")
531
+ processed_response = process_character_response(response)
532
+ conversation.append((current_char['name'], processed_response))
533
+ except Exception as e:
534
+ error_message = f"Error generating response: {str(e)}"
535
+ conversation.append((current_char['name'], error_message))
536
+
537
+ return conversation, current_character
538
+
539
+
540
+ def character_interaction(character1: str, character2: str, api_endpoint: str, api_key: str,
541
+ num_turns: int, scenario: str, temperature: float,
542
+ user_interjection: str = "") -> List[str]:
543
+ characters = load_characters()
544
+ char1 = characters[character1]
545
+ char2 = characters[character2]
546
+ conversation = []
547
+ current_speaker = char1
548
+ other_speaker = char2
549
+
550
+ # Add scenario to the conversation start
551
+ if scenario:
552
+ conversation.append(f"Scenario: {scenario}")
553
+
554
+ for turn in range(num_turns):
555
+ # Construct the prompt for the current speaker
556
+ prompt = f"{current_speaker['name']}'s personality: {current_speaker['personality']}\n"
557
+ prompt += f"{other_speaker['name']}'s personality: {other_speaker['personality']}\n"
558
+ prompt += f"Conversation so far:\n" + "\n".join(
559
+ [msg if isinstance(msg, str) else f"{msg[0]}: {msg[1]}" for msg in conversation])
560
+
561
+ # Add user interjection if provided
562
+ if user_interjection and turn == num_turns // 2:
563
+ prompt += f"\n\nUser interjection: {user_interjection}\n"
564
+ conversation.append(f"User: {user_interjection}")
565
+
566
+ prompt += f"\n\nHow would {current_speaker['name']} respond?"
567
+
568
+ # FIXME - figure out why the double print is happening
569
+ # Get response from the LLM
570
+ response = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None, False, temperature, "")
571
+
572
+ # Add the response to the conversation
573
+ conversation.append((current_speaker['name'], response))
574
+
575
+ # Switch speakers
576
+ current_speaker, other_speaker = other_speaker, current_speaker
577
+
578
+ # Convert the conversation to a list of strings for output
579
+ return [f"{msg[0]}: {msg[1]}" if isinstance(msg, tuple) else msg for msg in conversation]
580
+
581
+
582
+ def create_multiple_character_chat_tab():
583
+ with gr.TabItem("Multi-Character Chat"):
584
+ characters, conversation, current_character, other_character = character_interaction_setup()
585
+
586
+ with gr.Blocks() as character_interaction:
587
+ gr.Markdown("# Multi-Character Chat")
588
+
589
+ with gr.Row():
590
+ num_characters = gr.Dropdown(label="Number of Characters", choices=["2", "3", "4"], value="2")
591
+ character_selectors = [gr.Dropdown(label=f"Character {i + 1}", choices=list(characters.keys())) for i in
592
+ range(4)]
593
+
594
+ api_endpoint = gr.Dropdown(label="API Endpoint",
595
+ choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek",
596
+ "Mistral",
597
+ "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM",
598
+ "ollama", "HuggingFace",
599
+ "Custom-OpenAI-API"],
600
+ value="HuggingFace")
601
+ api_key = gr.Textbox(label="API Key (if required)", type="password")
602
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
603
+ scenario = gr.Textbox(label="Scenario (optional)", lines=3)
604
+
605
+ chat_display = gr.Chatbot(label="Character Interaction")
606
+ current_index = gr.State(0)
607
+
608
+ next_turn_btn = gr.Button("Next Turn")
609
+ narrator_input = gr.Textbox(label="Narrator Input", placeholder="Add a narration or description...")
610
+ add_narration_btn = gr.Button("Add Narration")
611
+ error_box = gr.Textbox(label="Error Messages", visible=False)
612
+ reset_btn = gr.Button("Reset Conversation")
613
+ chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
614
+ save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
615
+
616
+ def update_character_selectors(num):
617
+ return [gr.update(visible=True) if i < int(num) else gr.update(visible=False) for i in range(4)]
618
+
619
+ num_characters.change(
620
+ update_character_selectors,
621
+ inputs=[num_characters],
622
+ outputs=character_selectors
623
+ )
624
+
625
+ def reset_conversation():
626
+ return [], 0, gr.update(value=""), gr.update(value="")
627
+
628
+ def take_turn(conversation, current_index, char1, char2, char3, char4, api_endpoint, api_key, temperature,
629
+ scenario):
630
+ char_selectors = [char for char in [char1, char2, char3, char4] if char] # Remove None values
631
+ num_chars = len(char_selectors)
632
+
633
+ if num_chars == 0:
634
+ return conversation, current_index # No characters selected, return without changes
635
+
636
+ if not conversation:
637
+ conversation = []
638
+ if scenario:
639
+ conversation.append(("Scenario", scenario))
640
+
641
+ current_character = char_selectors[current_index % num_chars]
642
+ next_index = (current_index + 1) % num_chars
643
+
644
+ prompt = f"Character speaking: {current_character}\nOther characters: {', '.join(char for char in char_selectors if char != current_character)}\n"
645
+ prompt += "Generate the next part of the conversation, including character dialogues and actions. Characters should speak in first person."
646
+
647
+ response, new_conversation, _ = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "",
648
+ None, False, temperature, "")
649
+
650
+ # Format the response
651
+ formatted_lines = []
652
+ for line in response.split('\n'):
653
+ if ':' in line:
654
+ speaker, text = line.split(':', 1)
655
+ formatted_lines.append(f"**{speaker.strip()}**: {text.strip()}")
656
+ else:
657
+ formatted_lines.append(line)
658
+
659
+ formatted_response = '\n'.join(formatted_lines)
660
+
661
+ # Update the last message in the conversation with the formatted response
662
+ if new_conversation:
663
+ new_conversation[-1] = (new_conversation[-1][0], formatted_response)
664
+ else:
665
+ new_conversation.append((current_character, formatted_response))
666
+
667
+ return new_conversation, next_index
668
+
669
+ def add_narration(narration, conversation):
670
+ if narration:
671
+ conversation.append(("Narrator", narration))
672
+ return conversation, ""
673
+
674
+ def take_turn_with_error_handling(conversation, current_index, char1, char2, char3, char4, api_endpoint,
675
+ api_key, temperature, scenario):
676
+ try:
677
+ new_conversation, next_index = take_turn(conversation, current_index, char1, char2, char3, char4,
678
+ api_endpoint, api_key, temperature, scenario)
679
+ return new_conversation, next_index, gr.update(visible=False, value="")
680
+ except Exception as e:
681
+ error_message = f"An error occurred: {str(e)}"
682
+ return conversation, current_index, gr.update(visible=True, value=error_message)
683
+
684
+ # Define States for conversation_id and media_content, which are required for saving chat history
685
+ media_content = gr.State({})
686
+ conversation_id = gr.State(str(uuid.uuid4()))
687
+
688
+ next_turn_btn.click(
689
+ take_turn_with_error_handling,
690
+ inputs=[chat_display, current_index] + character_selectors + [api_endpoint, api_key, temperature,
691
+ scenario],
692
+ outputs=[chat_display, current_index, error_box]
693
+ )
694
+
695
+ add_narration_btn.click(
696
+ add_narration,
697
+ inputs=[narrator_input, chat_display],
698
+ outputs=[chat_display, narrator_input]
699
+ )
700
+
701
+ reset_btn.click(
702
+ reset_conversation,
703
+ outputs=[chat_display, current_index, scenario, narrator_input]
704
+ )
705
+
706
+ # FIXME - Implement saving chat history to database; look at Chat_UI.py for reference
707
+ save_chat_history_to_db.click(
708
+ save_chat_history_to_db_wrapper,
709
+ inputs=[chat_display, conversation_id, media_content, chat_media_name],
710
+ outputs=[conversation_id, gr.Textbox(label="Save Status")]
711
+ )
712
+
713
+ return character_interaction
714
+
715
+ #
716
+ # End of Multi-Character chat tab
717
+ ########################################################################################################################
718
+ #
719
+ # Narrator-Controlled Conversation Tab
720
+
721
+ # From `Fuzzlewumper` on Reddit.
722
+ def create_narrator_controlled_conversation_tab():
723
+ with gr.TabItem("Narrator-Controlled Conversation"):
724
+ gr.Markdown("# Narrator-Controlled Conversation")
725
+
726
+ with gr.Row():
727
+ with gr.Column(scale=1):
728
+ api_endpoint = gr.Dropdown(
729
+ label="API Endpoint",
730
+ choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
731
+ "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace",
732
+ "Custom-OpenAI-API"],
733
+ value="HuggingFace"
734
+ )
735
+ api_key = gr.Textbox(label="API Key (if required)", type="password")
736
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
737
+
738
+ with gr.Column(scale=2):
739
+ narrator_input = gr.Textbox(
740
+ label="Narrator Input",
741
+ placeholder="Set the scene or provide context...",
742
+ lines=3
743
+ )
744
+
745
+ character_inputs = []
746
+ for i in range(4): # Allow up to 4 characters
747
+ with gr.Row():
748
+ name = gr.Textbox(label=f"Character {i + 1} Name")
749
+ description = gr.Textbox(label=f"Character {i + 1} Description", lines=3)
750
+ character_inputs.append((name, description))
751
+
752
+ conversation_display = gr.Chatbot(label="Conversation", height=400)
753
+ user_input = gr.Textbox(label="Your Input (optional)", placeholder="Add your own dialogue or action...")
754
+
755
+ with gr.Row():
756
+ generate_btn = gr.Button("Generate Next Interaction")
757
+ reset_btn = gr.Button("Reset Conversation")
758
+ chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
759
+ save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
760
+
761
+ error_box = gr.Textbox(label="Error Messages", visible=False)
762
+
763
+ # Define States for conversation_id and media_content, which are required for saving chat history
764
+ conversation_id = gr.State(str(uuid.uuid4()))
765
+ media_content = gr.State({})
766
+
767
+ def generate_interaction(conversation, narrator_text, user_text, api_endpoint, api_key, temperature,
768
+ *character_data):
769
+ try:
770
+ characters = [{"name": name.strip(), "description": desc.strip()}
771
+ for name, desc in zip(character_data[::2], character_data[1::2])
772
+ if name.strip() and desc.strip()]
773
+
774
+ if not characters:
775
+ raise ValueError("At least one character must be defined.")
776
+
777
+ prompt = f"Narrator: {narrator_text}\n\n"
778
+ for char in characters:
779
+ prompt += f"Character '{char['name']}': {char['description']}\n"
780
+ prompt += "\nGenerate the next part of the conversation, including character dialogues and actions. "
781
+ prompt += "Characters should speak in first person. "
782
+ if user_text:
783
+ prompt += f"\nIncorporate this user input: {user_text}"
784
+ prompt += "\nResponse:"
785
+
786
+ response, conversation, _ = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None,
787
+ False, temperature, "")
788
+
789
+ # Format the response
790
+ formatted_lines = []
791
+ for line in response.split('\n'):
792
+ if ':' in line:
793
+ speaker, text = line.split(':', 1)
794
+ formatted_lines.append(f"**{speaker.strip()}**: {text.strip()}")
795
+ else:
796
+ formatted_lines.append(line)
797
+
798
+ formatted_response = '\n'.join(formatted_lines)
799
+
800
+ # Update the last message in the conversation with the formatted response
801
+ if conversation:
802
+ conversation[-1] = (conversation[-1][0], formatted_response)
803
+ else:
804
+ conversation.append((None, formatted_response))
805
+
806
+ return conversation, gr.update(value=""), gr.update(value=""), gr.update(visible=False, value="")
807
+ except Exception as e:
808
+ error_message = f"An error occurred: {str(e)}"
809
+ return conversation, gr.update(), gr.update(), gr.update(visible=True, value=error_message)
810
+
811
+ def reset_conversation():
812
+ return [], gr.update(value=""), gr.update(value=""), gr.update(visible=False, value="")
813
+
814
+ generate_btn.click(
815
+ generate_interaction,
816
+ inputs=[conversation_display, narrator_input, user_input, api_endpoint, api_key, temperature] +
817
+ [input for char_input in character_inputs for input in char_input],
818
+ outputs=[conversation_display, narrator_input, user_input, error_box]
819
+ )
820
+
821
+ reset_btn.click(
822
+ reset_conversation,
823
+ outputs=[conversation_display, narrator_input, user_input, error_box]
824
+ )
825
+
826
+ # FIXME - Implement saving chat history to database; look at Chat_UI.py for reference
827
+ save_chat_history_to_db.click(
828
+ save_chat_history_to_db_wrapper,
829
+ inputs=[conversation_display, conversation_id, media_content, chat_media_name],
830
+ outputs=[conversation_id, gr.Textbox(label="Save Status")]
831
+ )
832
+
833
+
834
+ return api_endpoint, api_key, temperature, narrator_input, conversation_display, user_input, generate_btn, reset_btn, error_box
835
+
836
+ #
837
+ # End of Narrator-Controlled Conversation tab
838
+ ########################################################################################################################
App_Function_Libraries/Gradio_UI/Chat_Workflows.py CHANGED
@@ -23,7 +23,6 @@ with json_path.open('r') as f:
23
  workflows = json.load(f)
24
 
25
 
26
- # FIXME - broken Completely. Doesn't work.
27
  def chat_workflows_tab():
28
  with gr.TabItem("Chat Workflows"):
29
  gr.Markdown("# Workflows using LLMs")
 
23
  workflows = json.load(f)
24
 
25
 
 
26
  def chat_workflows_tab():
27
  with gr.TabItem("Chat Workflows"):
28
  gr.Markdown("# Workflows using LLMs")
App_Function_Libraries/Gradio_UI/Chat_ui.py CHANGED
@@ -64,10 +64,23 @@ def get_system_prompt(preset_name):
64
  return prompts["system_prompt"]
65
 
66
  def clear_chat():
67
- # Return empty list for chatbot and None for conversation_id
 
 
 
68
  return gr.update(value=[]), None
69
 
70
 
 
 
 
 
 
 
 
 
 
 
71
  # FIXME - add additional features....
72
  def chat_wrapper(message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, conversation_id,
73
  save_conversation, temperature, system_prompt, max_tokens=None, top_p=None, frequency_penalty=None,
@@ -214,7 +227,7 @@ def create_chat_interface():
214
  value="You are a helpful AI assitant",
215
  lines=3,
216
  visible=False)
217
- with gr.Column():
218
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
219
  msg = gr.Textbox(label="Enter your message")
220
  submit = gr.Button("Submit")
@@ -653,57 +666,128 @@ def create_chat_interface_four():
653
  overflow-y: auto;
654
  }
655
  """
 
656
  with gr.TabItem("Four Independent API Chats"):
657
  gr.Markdown("# Four Independent API Chat Interfaces")
658
 
659
  with gr.Row():
660
  with gr.Column():
661
- preset_prompt = gr.Dropdown(label="Select Preset Prompt", choices=load_preset_prompts(), visible=True)
662
- user_prompt = gr.Textbox(label="Modify Prompt", lines=3, value=".")
 
 
 
 
 
 
 
663
  with gr.Column():
664
  gr.Markdown("Scroll down for the chat windows...")
 
665
  chat_interfaces = []
666
- for row in range(2):
667
- with gr.Row():
668
- for col in range(2):
669
- i = row * 2 + col
670
- with gr.Column():
671
- gr.Markdown(f"### Chat Window {i + 1}")
672
- api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
673
- choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
674
- "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold",
675
- "Ooba",
676
- "Tabbyapi", "VLLM", "ollama", "HuggingFace"])
677
- api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password")
678
- temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.05,
679
- value=0.7)
680
- chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
681
- msg = gr.Textbox(label=f"Enter your message for Chat {i + 1}")
682
- submit = gr.Button(f"Submit to Chat {i + 1}")
683
-
684
- chat_interfaces.append({
685
- 'api_endpoint': api_endpoint,
686
- 'api_key': api_key,
687
- 'temperature': temperature,
688
- 'chatbot': chatbot,
689
- 'msg': msg,
690
- 'submit': submit,
691
- 'chat_history': gr.State([])
692
- })
693
 
694
- preset_prompt.change(update_user_prompt, inputs=preset_prompt, outputs=user_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
 
696
  def chat_wrapper_single(message, chat_history, api_endpoint, api_key, temperature, user_prompt):
697
  logging.debug(f"Chat Wrapper Single - Message: {message}, Chat History: {chat_history}")
 
 
698
  new_msg, new_history, _ = chat_wrapper(
699
- message, chat_history, {}, [], # Empty media_content and selected_parts
700
- api_endpoint, api_key, user_prompt, None, # No conversation_id
701
- False, # Not saving conversation
702
- temperature=temperature, system_prompt=""
 
 
 
 
 
 
 
 
 
 
 
 
703
  )
704
- chat_history.append((message, new_msg))
 
 
 
 
 
705
  return "", chat_history, chat_history
706
 
 
707
  for interface in chat_interfaces:
708
  logging.debug(f"Chat Interface - Clicked Submit for Chat {interface['chatbot']}"),
709
  interface['submit'].click(
@@ -723,6 +807,13 @@ def create_chat_interface_four():
723
  ]
724
  )
725
 
 
 
 
 
 
 
 
726
 
727
  def chat_wrapper_single(message, chat_history, chatbot, api_endpoint, api_key, temperature, media_content,
728
  selected_parts, conversation_id, save_conversation, user_prompt):
 
64
  return prompts["system_prompt"]
65
 
66
  def clear_chat():
67
+ """
68
+ Return empty list for chatbot and None for conversation_id
69
+ @return:
70
+ """
71
  return gr.update(value=[]), None
72
 
73
 
74
+ def clear_chat_single():
75
+ """
76
+ Clears the chatbot and chat history.
77
+
78
+ Returns:
79
+ list: Empty list for chatbot messages.
80
+ list: Empty list for chat history.
81
+ """
82
+ return [], []
83
+
84
  # FIXME - add additional features....
85
  def chat_wrapper(message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, conversation_id,
86
  save_conversation, temperature, system_prompt, max_tokens=None, top_p=None, frequency_penalty=None,
 
227
  value="You are a helpful AI assitant",
228
  lines=3,
229
  visible=False)
230
+ with gr.Column(scale=2):
231
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
232
  msg = gr.Textbox(label="Enter your message")
233
  submit = gr.Button("Submit")
 
666
  overflow-y: auto;
667
  }
668
  """
669
+
670
  with gr.TabItem("Four Independent API Chats"):
671
  gr.Markdown("# Four Independent API Chat Interfaces")
672
 
673
  with gr.Row():
674
  with gr.Column():
675
+ preset_prompt = gr.Dropdown(
676
+ label="Select Preset Prompt",
677
+ choices=load_preset_prompts(),
678
+ visible=True
679
+ )
680
+ user_prompt = gr.Textbox(
681
+ label="Modify Prompt",
682
+ lines=3
683
+ )
684
  with gr.Column():
685
  gr.Markdown("Scroll down for the chat windows...")
686
+
687
  chat_interfaces = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
688
 
689
+ def create_single_chat_interface(index, user_prompt_component):
690
+ """
691
+ Creates a single chat interface with its own set of components and event bindings.
692
+
693
+ Parameters:
694
+ index (int): The index of the chat interface.
695
+ user_prompt_component (gr.Textbox): The user prompt textbox component.
696
+
697
+ Returns:
698
+ dict: A dictionary containing all components of the chat interface.
699
+ """
700
+ with gr.Column():
701
+ gr.Markdown(f"### Chat Window {index + 1}")
702
+ api_endpoint = gr.Dropdown(
703
+ label=f"API Endpoint {index + 1}",
704
+ choices=[
705
+ "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
706
+ "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold",
707
+ "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"
708
+ ]
709
+ )
710
+ api_key = gr.Textbox(
711
+ label=f"API Key {index + 1} (if required)",
712
+ type="password"
713
+ )
714
+ temperature = gr.Slider(
715
+ label=f"Temperature {index + 1}",
716
+ minimum=0.0,
717
+ maximum=1.0,
718
+ step=0.05,
719
+ value=0.7
720
+ )
721
+ chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
722
+ msg = gr.Textbox(label=f"Enter your message for Chat {index + 1}")
723
+ submit = gr.Button(f"Submit to Chat {index + 1}")
724
+ clear_chat_button = gr.Button(f"Clear Chat {index + 1}")
725
+
726
+ # State to maintain chat history
727
+ chat_history = gr.State([])
728
+
729
+ # Append to chat_interfaces list
730
+ chat_interfaces.append({
731
+ 'api_endpoint': api_endpoint,
732
+ 'api_key': api_key,
733
+ 'temperature': temperature,
734
+ 'chatbot': chatbot,
735
+ 'msg': msg,
736
+ 'submit': submit,
737
+ 'clear_chat_button': clear_chat_button,
738
+ 'chat_history': chat_history
739
+ })
740
+
741
+ # # Create four chat interfaces
742
+ # for i in range(4):
743
+ # create_single_chat_interface(i, user_prompt)
744
+
745
+ # Create four chat interfaces arranged in a 2x2 grid
746
+ with gr.Row():
747
+ for i in range(2):
748
+ with gr.Column():
749
+ for j in range(2):
750
+ create_single_chat_interface(i * 2 + j, user_prompt)
751
+
752
+
753
+ # Update user_prompt based on preset_prompt selection
754
+ preset_prompt.change(
755
+ fn=update_user_prompt,
756
+ inputs=preset_prompt,
757
+ outputs=user_prompt
758
+ )
759
 
760
  def chat_wrapper_single(message, chat_history, api_endpoint, api_key, temperature, user_prompt):
761
  logging.debug(f"Chat Wrapper Single - Message: {message}, Chat History: {chat_history}")
762
+
763
+ # Call chat_wrapper with the new signature and the additional parameters
764
  new_msg, new_history, _ = chat_wrapper(
765
+ message,
766
+ chat_history,
767
+ {}, # Empty media_content
768
+ [], # Empty selected_parts
769
+ api_endpoint,
770
+ api_key,
771
+ user_prompt, # custom_prompt
772
+ None, # conversation_id
773
+ False, # save_conversation
774
+ temperature, # temperature
775
+ system_prompt="", # system_prompt
776
+ max_tokens=None, # Additional parameters with default None values
777
+ top_p=None,
778
+ frequency_penalty=None,
779
+ presence_penalty=None,
780
+ stop_sequence=None
781
  )
782
+ # Only append to history if the new message was successful (i.e., no error in API response)
783
+ if "API request failed" not in new_msg:
784
+ chat_history.append((message, new_msg))
785
+ else:
786
+ logging.error(f"API request failed: {new_msg}")
787
+
788
  return "", chat_history, chat_history
789
 
790
+ # Attach click events for each chat interface
791
  for interface in chat_interfaces:
792
  logging.debug(f"Chat Interface - Clicked Submit for Chat {interface['chatbot']}"),
793
  interface['submit'].click(
 
807
  ]
808
  )
809
 
810
+ # Bind the clear chat button
811
+ interface['clear_chat_button'].click(
812
+ clear_chat_single,
813
+ inputs=[],
814
+ outputs=[interface['chatbot'], interface['chat_history']]
815
+ )
816
+
817
 
818
  def chat_wrapper_single(message, chat_history, chatbot, api_endpoint, api_key, temperature, media_content,
819
  selected_parts, conversation_id, save_conversation, user_prompt):
App_Function_Libraries/Gradio_UI/Embeddings_tab.py CHANGED
@@ -193,6 +193,10 @@ def create_view_embeddings_tab():
193
  label="Select API for Contextualized Embeddings",
194
  value="OpenAI"
195
  )
 
 
 
 
196
  contextual_api_key = gr.Textbox(label="API Key", lines=1)
197
 
198
  def get_items_with_embedding_status():
@@ -251,7 +255,8 @@ def create_view_embeddings_tab():
251
  logging.error(f"Error in check_embedding_status: {str(e)}")
252
  return f"Error processing item: {selected_item}. Details: {str(e)}", "", ""
253
 
254
- def create_new_embedding_for_item(selected_item, provider, model, api_url, method, max_size, overlap, adaptive, item_mapping, contextual_api_choice=None):
 
255
  if not selected_item:
256
  return "Please select an item", "", ""
257
 
@@ -290,10 +295,14 @@ def create_view_embeddings_tab():
290
  chunk_metadata = chunk['metadata']
291
  if chunk_count == 0:
292
  chunk_count = 1
293
- # Generate contextual summary
294
- logging.debug(f"Generating contextual summary for chunk {chunk_count}")
295
- context = situate_context(contextual_api_choice, item['content'], chunk_text)
296
- contextualized_text = f"{chunk_text}\n\nContextual Summary: {context}"
 
 
 
 
297
 
298
  chunk_id = f"doc_{item_id}_chunk_{i}"
299
  metadata = {
@@ -307,6 +316,7 @@ def create_view_embeddings_tab():
307
  "embedding_model": model,
308
  "embedding_provider": provider,
309
  "original_text": chunk_text,
 
310
  "contextual_summary": context,
311
  **chunk_metadata
312
  }
@@ -323,8 +333,17 @@ def create_view_embeddings_tab():
323
  # Store in Chroma
324
  store_in_chroma(collection_name, texts, embeddings, ids, metadatas)
325
 
 
326
  embedding_preview = str(embeddings[0][:50]) if embeddings else "No embeddings created"
327
- status = f"New contextual embeddings created and stored for item: {item['title']} (ID: {item_id})"
 
 
 
 
 
 
 
 
328
  return status, f"First 50 elements of new embedding:\n{embedding_preview}", json.dumps(metadatas[0], indent=2)
329
  except Exception as e:
330
  logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
@@ -342,7 +361,8 @@ def create_view_embeddings_tab():
342
  create_new_embedding_button.click(
343
  create_new_embedding_for_item,
344
  inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url,
345
- chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking, item_mapping, contextual_api_choice],
 
346
  outputs=[embedding_status, embedding_preview, embedding_metadata]
347
  )
348
  embedding_provider.change(
@@ -351,7 +371,10 @@ def create_view_embeddings_tab():
351
  outputs=[embedding_api_url]
352
  )
353
 
354
- return item_dropdown, refresh_button, embedding_status, embedding_preview, embedding_metadata, create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url, chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking
 
 
 
355
 
356
 
357
  def create_purge_embeddings_tab():
 
193
  label="Select API for Contextualized Embeddings",
194
  value="OpenAI"
195
  )
196
+ use_contextual_embeddings = gr.Checkbox(
197
+ label="Use Contextual Embeddings",
198
+ value=True
199
+ )
200
  contextual_api_key = gr.Textbox(label="API Key", lines=1)
201
 
202
  def get_items_with_embedding_status():
 
255
  logging.error(f"Error in check_embedding_status: {str(e)}")
256
  return f"Error processing item: {selected_item}. Details: {str(e)}", "", ""
257
 
258
+ def create_new_embedding_for_item(selected_item, provider, model, api_url, method, max_size, overlap, adaptive,
259
+ item_mapping, use_contextual, contextual_api_choice=None):
260
  if not selected_item:
261
  return "Please select an item", "", ""
262
 
 
295
  chunk_metadata = chunk['metadata']
296
  if chunk_count == 0:
297
  chunk_count = 1
298
+ if use_contextual:
299
+ # Generate contextual summary
300
+ logging.debug(f"Generating contextual summary for chunk {chunk_count}")
301
+ context = situate_context(contextual_api_choice, item['content'], chunk_text)
302
+ contextualized_text = f"{chunk_text}\n\nContextual Summary: {context}"
303
+ else:
304
+ contextualized_text = chunk_text
305
+ context = None
306
 
307
  chunk_id = f"doc_{item_id}_chunk_{i}"
308
  metadata = {
 
316
  "embedding_model": model,
317
  "embedding_provider": provider,
318
  "original_text": chunk_text,
319
+ "use_contextual_embeddings": use_contextual,
320
  "contextual_summary": context,
321
  **chunk_metadata
322
  }
 
333
  # Store in Chroma
334
  store_in_chroma(collection_name, texts, embeddings, ids, metadatas)
335
 
336
+ # Create a preview of the first embedding
337
  embedding_preview = str(embeddings[0][:50]) if embeddings else "No embeddings created"
338
+
339
+ # Return status message
340
+ status = f"New embeddings created and stored for item: {item['title']} (ID: {item_id})"
341
+
342
+ # Add contextual summaries to status message if enabled
343
+ if use_contextual:
344
+ status += " (with contextual summaries)"
345
+
346
+ # Return status message, embedding preview, and metadata
347
  return status, f"First 50 elements of new embedding:\n{embedding_preview}", json.dumps(metadatas[0], indent=2)
348
  except Exception as e:
349
  logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
 
361
  create_new_embedding_button.click(
362
  create_new_embedding_for_item,
363
  inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url,
364
+ chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking, item_mapping,
365
+ use_contextual_embeddings, contextual_api_choice],
366
  outputs=[embedding_status, embedding_preview, embedding_metadata]
367
  )
368
  embedding_provider.change(
 
371
  outputs=[embedding_api_url]
372
  )
373
 
374
+ return (item_dropdown, refresh_button, embedding_status, embedding_preview, embedding_metadata,
375
+ create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url,
376
+ chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking,
377
+ use_contextual_embeddings, contextual_api_choice, contextual_api_key)
378
 
379
 
380
  def create_purge_embeddings_tab():
App_Function_Libraries/Gradio_UI/Gradio_Shared.py CHANGED
@@ -153,11 +153,12 @@ def update_user_prompt(preset_name):
153
  # Return a dictionary with all details
154
  return {
155
  "title": details[0],
156
- "details": details[1],
157
- "system_prompt": details[2],
158
- "user_prompt": details[3] if len(details) > 3 else ""
 
159
  }
160
- return {"title": "", "details": "", "system_prompt": "", "user_prompt": ""}
161
 
162
  def browse_items(search_query, search_type):
163
  if search_type == 'Keyword':
 
153
  # Return a dictionary with all details
154
  return {
155
  "title": details[0],
156
+ "author": details[1],
157
+ "details": details[2],
158
+ "system_prompt": details[3],
159
+ "user_prompt": details[4] if len(details) > 3 else "",
160
  }
161
+ return {"title": "", "details": "", "system_prompt": "", "user_prompt": "", "author": ""}
162
 
163
  def browse_items(search_query, search_type):
164
  if search_type == 'Keyword':
App_Function_Libraries/Gradio_UI/Media_edit.py CHANGED
@@ -211,6 +211,7 @@ def create_prompt_edit_tab():
211
 
212
  with gr.Column():
213
  title_input = gr.Textbox(label="Title", placeholder="Enter the prompt title")
 
214
  description_input = gr.Textbox(label="Description", placeholder="Enter the prompt description", lines=3)
215
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter the system prompt", lines=3)
216
  user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter the user prompt", lines=3)
@@ -225,7 +226,7 @@ def create_prompt_edit_tab():
225
 
226
  add_prompt_button.click(
227
  fn=add_or_update_prompt,
228
- inputs=[title_input, description_input, system_prompt_input, user_prompt_input],
229
  outputs=add_prompt_output
230
  )
231
 
@@ -233,7 +234,7 @@ def create_prompt_edit_tab():
233
  prompt_dropdown.change(
234
  fn=load_prompt_details,
235
  inputs=[prompt_dropdown],
236
- outputs=[title_input, description_input, system_prompt_input, user_prompt_input]
237
  )
238
 
239
 
@@ -251,6 +252,7 @@ def create_prompt_clone_tab():
251
 
252
  with gr.Column():
253
  title_input = gr.Textbox(label="Title", placeholder="Enter the prompt title")
 
254
  description_input = gr.Textbox(label="Description", placeholder="Enter the prompt description", lines=3)
255
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter the system prompt", lines=3)
256
  user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter the user prompt", lines=3)
@@ -268,7 +270,7 @@ def create_prompt_clone_tab():
268
  prompt_dropdown.change(
269
  fn=load_prompt_details,
270
  inputs=[prompt_dropdown],
271
- outputs=[title_input, description_input, system_prompt_input, user_prompt_input]
272
  )
273
 
274
  def prepare_for_cloning(selected_prompt):
 
211
 
212
  with gr.Column():
213
  title_input = gr.Textbox(label="Title", placeholder="Enter the prompt title")
214
+ author_input = gr.Textbox(label="Author", placeholder="Enter the prompt's author", lines=3)
215
  description_input = gr.Textbox(label="Description", placeholder="Enter the prompt description", lines=3)
216
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter the system prompt", lines=3)
217
  user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter the user prompt", lines=3)
 
226
 
227
  add_prompt_button.click(
228
  fn=add_or_update_prompt,
229
+ inputs=[title_input, author_input, description_input, system_prompt_input, user_prompt_input],
230
  outputs=add_prompt_output
231
  )
232
 
 
234
  prompt_dropdown.change(
235
  fn=load_prompt_details,
236
  inputs=[prompt_dropdown],
237
+ outputs=[title_input, author_input, system_prompt_input, user_prompt_input]
238
  )
239
 
240
 
 
252
 
253
  with gr.Column():
254
  title_input = gr.Textbox(label="Title", placeholder="Enter the prompt title")
255
+ author_input = gr.Textbox(label="Author", placeholder="Enter the prompt's author", lines=3)
256
  description_input = gr.Textbox(label="Description", placeholder="Enter the prompt description", lines=3)
257
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter the system prompt", lines=3)
258
  user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter the user prompt", lines=3)
 
270
  prompt_dropdown.change(
271
  fn=load_prompt_details,
272
  inputs=[prompt_dropdown],
273
+ outputs=[title_input, author_input, description_input, system_prompt_input, user_prompt_input]
274
  )
275
 
276
  def prepare_for_cloning(selected_prompt):
App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py CHANGED
@@ -26,7 +26,7 @@ def create_pdf_ingestion_tab():
26
  gr.Markdown("# Ingest PDF Files and Extract Metadata")
27
  with gr.Row():
28
  with gr.Column():
29
- pdf_file_input = gr.File(label="Uploaded PDF File", file_types=[".pdf"], visible=False)
30
  pdf_upload_button = gr.UploadButton("Click to Upload PDF", file_types=[".pdf"])
31
  pdf_title_input = gr.Textbox(label="Title (Optional)")
32
  pdf_author_input = gr.Textbox(label="Author (Optional)")
 
26
  gr.Markdown("# Ingest PDF Files and Extract Metadata")
27
  with gr.Row():
28
  with gr.Column():
29
+ pdf_file_input = gr.File(label="Uploaded PDF File", file_types=[".pdf"], visible=True)
30
  pdf_upload_button = gr.UploadButton("Click to Upload PDF", file_types=[".pdf"])
31
  pdf_title_input = gr.Textbox(label="Title (Optional)")
32
  pdf_author_input = gr.Textbox(label="Author (Optional)")
App_Function_Libraries/Gradio_UI/Prompt_Suggestion_tab.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Description: Gradio UI for Creating and Testing new Prompts
2
+ #
3
+ # Imports
4
+ import gradio as gr
5
+
6
+ from App_Function_Libraries.Chat import chat
7
+ from App_Function_Libraries.DB.SQLite_DB import add_or_update_prompt
8
+ from App_Function_Libraries.Prompt_Engineering.Prompt_Engineering import generate_prompt, test_generated_prompt
9
+
10
+
11
+ #
12
+ # Local Imports
13
+
14
+ #
15
+ ########################################################################################################################
16
+ #
17
+ # Functions
18
+
19
+ # Gradio tab for prompt suggestion and testing
20
+ def create_prompt_suggestion_tab():
21
+ with gr.TabItem("Prompt Suggestion/Creation"):
22
+ gr.Markdown("# Generate and Test AI Prompts with the Metaprompt Approach")
23
+
24
+ with gr.Row():
25
+ with gr.Column():
26
+ # Task and variable inputs
27
+ task_input = gr.Textbox(label="Task Description",
28
+ placeholder="E.g., Draft an email responding to a customer complaint")
29
+ variables_input = gr.Textbox(label="Variables (comma-separated)",
30
+ placeholder="E.g., CUSTOMER_COMPLAINT, COMPANY_NAME")
31
+
32
+ # API-related inputs
33
+ api_name_input = gr.Dropdown(
34
+ choices=["OpenAI", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp",
35
+ "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
36
+ label="API Provider",
37
+ value="OpenAI" # Default selection
38
+ )
39
+
40
+ api_key_input = gr.Textbox(label="API Key", placeholder="Enter your API key (if required)",
41
+ type="password")
42
+
43
+ # Temperature slider for controlling randomness of generation
44
+ temperature_input = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Temperature")
45
+
46
+ # Button to generate the prompt
47
+ generate_prompt_button = gr.Button("Generate Prompt")
48
+
49
+ with gr.Column():
50
+ # Output for the generated prompt
51
+ generated_prompt_output = gr.Textbox(label="Generated Prompt", interactive=False)
52
+ # FIXME - figure this out
53
+ # copy_button = gr.HTML("""
54
+ # <button onclick="copyToClipboard()">Copy</button>
55
+ # <script>
56
+ # function copyToClipboard() {
57
+ # const textBox = document.querySelector('textarea'); // Select the textarea
58
+ # textBox.select(); // Select the text
59
+ # document.execCommand('copy'); // Copy it to clipboard
60
+ # alert('Copied to clipboard!');
61
+ # }
62
+ # </script>
63
+ # """)
64
+ # Section to test the generated prompt
65
+ with gr.Row():
66
+ with gr.Column():
67
+ # Input to test the prompt with variable values
68
+ variable_values_input = gr.Textbox(label="Variable Values (comma-separated)",
69
+ placeholder="Enter variable values in order, comma-separated")
70
+ test_prompt_button = gr.Button("Test Generated Prompt")
71
+ with gr.Column():
72
+ # Output for the test result
73
+ test_output = gr.Textbox(label="Test Output", interactive=False)
74
+
75
+ # Section to save the generated prompt to the database
76
+ with gr.Row():
77
+ with gr.Column():
78
+ prompt_title_input = gr.Textbox(label="Prompt Title", placeholder="Enter a title for this prompt")
79
+ prompt_author_input = gr.Textbox(label="Author",
80
+ placeholder="Enter the author's name") # New author field
81
+ prompt_description_input = gr.Textbox(label="Prompt Description", placeholder="Enter a description", lines=3)
82
+ save_prompt_button = gr.Button("Save Prompt to Database")
83
+ save_prompt_output = gr.Textbox(label="Save Prompt Output", interactive=False)
84
+
85
+ # Callback function to generate prompt
86
+ def on_generate_prompt(api_name, api_key, task, variables, temperature):
87
+ # Generate the prompt using the metaprompt approach and API
88
+ generated_prompt = generate_prompt(api_name, api_key, task, variables, temperature)
89
+ return generated_prompt
90
+
91
+ # Callback function to test the generated prompt
92
+ def on_test_prompt(api_name, api_key, generated_prompt, variable_values, temperature):
93
+ # Test the prompt by filling in variable values
94
+ test_result = test_generated_prompt(api_name, api_key, generated_prompt, variable_values, temperature)
95
+ return test_result
96
+
97
+ # Callback function to save the generated prompt to the database
98
+ def on_save_prompt(title, author, description, generated_prompt):
99
+ if not title or not generated_prompt:
100
+ return "Error: Title and generated prompt are required."
101
+
102
+ # Add the generated prompt to the database
103
+ result = add_or_update_prompt(title, author, description, system_prompt="", user_prompt=generated_prompt, keywords=None)
104
+ return result
105
+
106
+ # Connect the button to the function that generates the prompt
107
+ generate_prompt_button.click(
108
+ fn=on_generate_prompt,
109
+ inputs=[api_name_input, api_key_input, task_input, variables_input, temperature_input],
110
+ outputs=[generated_prompt_output]
111
+ )
112
+
113
+ # Connect the button to the function that tests the generated prompt
114
+ test_prompt_button.click(
115
+ fn=on_test_prompt,
116
+ inputs=[api_name_input, api_key_input, generated_prompt_output, variable_values_input, temperature_input],
117
+ outputs=[test_output]
118
+ )
119
+
120
+ # Connect the save button to the function that saves the prompt to the database
121
+ save_prompt_button.click(
122
+ fn=on_save_prompt,
123
+ inputs=[prompt_title_input, prompt_author_input, prompt_description_input, generated_prompt_output],
124
+ outputs=[save_prompt_output]
125
+ )
126
+
127
+ # Example chat function based on your API structure
128
+ def chat_api_call(api_endpoint, api_key, input_data, prompt, temp, system_message=None):
129
+ # Here you will call your chat function as defined previously
130
+ response = chat(message=input_data, history=[], media_content={}, selected_parts=[],
131
+ api_endpoint=api_endpoint, api_key=api_key, prompt=prompt, temperature=temp,
132
+ system_message=system_message)
133
+ return response
134
+ #
135
+ # End of Functions
136
+ ########################################################################################################################
App_Function_Libraries/Gradio_UI/RAG_QA_Chat_tab.py CHANGED
@@ -58,6 +58,7 @@ def create_rag_qa_chat_tab():
58
  label="Select API for RAG",
59
  value="OpenAI"
60
  )
 
61
 
62
  with gr.Column(scale=2):
63
  chatbot = gr.Chatbot(height=500)
@@ -105,11 +106,12 @@ def create_rag_qa_chat_tab():
105
  loading_indicator = gr.HTML(visible=False)
106
 
107
  def rag_qa_chat_wrapper(message, history, context_source, existing_file, search_results, file_upload,
108
- convert_to_text, keywords, api_choice):
109
  try:
110
  logging.info(f"Starting rag_qa_chat_wrapper with message: {message}")
111
  logging.info(f"Context source: {context_source}")
112
  logging.info(f"API choice: {api_choice}")
 
113
 
114
  # Show loading indicator
115
  yield history, "", gr.update(visible=True)
@@ -118,14 +120,14 @@ def create_rag_qa_chat_tab():
118
  api_choice = api_choice.value if isinstance(api_choice, gr.components.Dropdown) else api_choice
119
  logging.info(f"Resolved API choice: {api_choice}")
120
 
121
- # Only rephrase the question if it's not the first query
122
- if len(history) > 0:
123
  rephrased_question = rephrase_question(history, message, api_choice)
124
  logging.info(f"Original question: {message}")
125
  logging.info(f"Rephrased question: {rephrased_question}")
126
  else:
127
  rephrased_question = message
128
- logging.info(f"First question, no rephrasing: {message}")
129
 
130
  if context_source == "All Files in the Database":
131
  # Use the enhanced_rag_pipeline to search the entire database
@@ -189,8 +191,11 @@ def create_rag_qa_chat_tab():
189
  logging.info(
190
  f"Response received from rag_qa_chat: {response[:100]}...")
191
 
192
- # Add the original question to the history
193
- new_history[-1] = (message, new_history[-1][1])
 
 
 
194
 
195
  gr.Info("Response generated successfully")
196
  logging.info("rag_qa_chat_wrapper completed successfully")
@@ -210,6 +215,7 @@ def create_rag_qa_chat_tab():
210
 
211
  def rephrase_question(history, latest_question, api_choice):
212
  # Thank you https://www.reddit.com/r/LocalLLaMA/comments/1fi1kex/multi_turn_conversation_and_rag/
 
213
  conversation_history = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history[:-1]])
214
  prompt = f"""You are a helpful assistant. Given the conversation history and the latest question, resolve any ambiguous references in the latest question.
215
 
@@ -223,6 +229,7 @@ def create_rag_qa_chat_tab():
223
 
224
  # Use the selected API to generate the rephrased question
225
  rephrased_question = generate_answer(api_choice, prompt, "")
 
226
  return rephrased_question.strip()
227
 
228
  def perform_search(query):
@@ -241,14 +248,14 @@ def create_rag_qa_chat_tab():
241
  submit.click(
242
  rag_qa_chat_wrapper,
243
  inputs=[msg, chatbot, context_source, existing_file, search_results, file_upload,
244
- convert_to_text, keywords, api_choice],
245
  outputs=[chatbot, msg, loading_indicator]
246
  )
247
 
248
  clear_chat.click(clear_chat_history, outputs=[chatbot, msg])
249
 
250
  return (context_source, existing_file, search_query, search_button, search_results, file_upload,
251
- convert_to_text, keywords, api_choice, chatbot, msg, submit, clear_chat)
252
 
253
  def convert_file_to_text(file_path):
254
  """Convert various file types to plain text."""
 
58
  label="Select API for RAG",
59
  value="OpenAI"
60
  )
61
+ use_query_rewriting = gr.Checkbox(label="Use Query Rewriting", value=True)
62
 
63
  with gr.Column(scale=2):
64
  chatbot = gr.Chatbot(height=500)
 
106
  loading_indicator = gr.HTML(visible=False)
107
 
108
  def rag_qa_chat_wrapper(message, history, context_source, existing_file, search_results, file_upload,
109
+ convert_to_text, keywords, api_choice, use_query_rewriting):
110
  try:
111
  logging.info(f"Starting rag_qa_chat_wrapper with message: {message}")
112
  logging.info(f"Context source: {context_source}")
113
  logging.info(f"API choice: {api_choice}")
114
+ logging.info(f"Query rewriting: {'enabled' if use_query_rewriting else 'disabled'}")
115
 
116
  # Show loading indicator
117
  yield history, "", gr.update(visible=True)
 
120
  api_choice = api_choice.value if isinstance(api_choice, gr.components.Dropdown) else api_choice
121
  logging.info(f"Resolved API choice: {api_choice}")
122
 
123
+ # Only rephrase the question if it's not the first query and query rewriting is enabled
124
+ if len(history) > 0 and use_query_rewriting:
125
  rephrased_question = rephrase_question(history, message, api_choice)
126
  logging.info(f"Original question: {message}")
127
  logging.info(f"Rephrased question: {rephrased_question}")
128
  else:
129
  rephrased_question = message
130
+ logging.info(f"Using original question: {message}")
131
 
132
  if context_source == "All Files in the Database":
133
  # Use the enhanced_rag_pipeline to search the entire database
 
191
  logging.info(
192
  f"Response received from rag_qa_chat: {response[:100]}...")
193
 
194
+ # Safely update history
195
+ if new_history:
196
+ new_history[-1] = (message, new_history[-1][1])
197
+ else:
198
+ new_history = [(message, response)]
199
 
200
  gr.Info("Response generated successfully")
201
  logging.info("rag_qa_chat_wrapper completed successfully")
 
215
 
216
  def rephrase_question(history, latest_question, api_choice):
217
  # Thank you https://www.reddit.com/r/LocalLLaMA/comments/1fi1kex/multi_turn_conversation_and_rag/
218
+ logging.info("RAG QnA: Rephrasing question")
219
  conversation_history = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history[:-1]])
220
  prompt = f"""You are a helpful assistant. Given the conversation history and the latest question, resolve any ambiguous references in the latest question.
221
 
 
229
 
230
  # Use the selected API to generate the rephrased question
231
  rephrased_question = generate_answer(api_choice, prompt, "")
232
+ logging.info(f"Rephrased question: {rephrased_question}")
233
  return rephrased_question.strip()
234
 
235
  def perform_search(query):
 
248
  submit.click(
249
  rag_qa_chat_wrapper,
250
  inputs=[msg, chatbot, context_source, existing_file, search_results, file_upload,
251
+ convert_to_text, keywords, api_choice, use_query_rewriting],
252
  outputs=[chatbot, msg, loading_indicator]
253
  )
254
 
255
  clear_chat.click(clear_chat_history, outputs=[chatbot, msg])
256
 
257
  return (context_source, existing_file, search_query, search_button, search_results, file_upload,
258
+ convert_to_text, keywords, api_choice, use_query_rewriting, chatbot, msg, submit, clear_chat)
259
 
260
  def convert_file_to_text(file_path):
261
  """Convert various file types to plain text."""
App_Function_Libraries/Gradio_UI/Video_transcription_tab.py CHANGED
@@ -112,14 +112,13 @@ def create_video_transcription_tab():
112
  "OpenRouter",
113
  "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
114
  value=None, label="API Name (Mandatory)")
115
- api_key_input = gr.Textbox(label="API Key (Mandatory)", placeholder="Enter your API key here",
116
  type="password")
117
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
118
  value="default,no_keyword_set")
119
  batch_size_input = gr.Slider(minimum=1, maximum=10, value=1, step=1,
120
  label="Batch Size (Number of videos to process simultaneously)")
121
- timestamp_option = gr.Radio(choices=["Include Timestamps", "Exclude Timestamps"],
122
- value="Include Timestamps", label="Timestamp Option")
123
  keep_original_video = gr.Checkbox(label="Keep Original Video", value=False)
124
  # First, create a checkbox to toggle the chunking options
125
  chunking_options_checkbox = gr.Checkbox(label="Show Chunking Options", value=False)
@@ -304,7 +303,7 @@ def create_video_transcription_tab():
304
  start_seconds, api_name, api_key,
305
  False, False, False, False, 0.01, None, keywords, None, diarize,
306
  end_time=end_seconds,
307
- include_timestamps=(timestamp_option == "Include Timestamps"),
308
  metadata=video_metadata,
309
  use_chunking=chunking_options_checkbox,
310
  chunk_options=chunk_options,
 
112
  "OpenRouter",
113
  "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
114
  value=None, label="API Name (Mandatory)")
115
+ api_key_input = gr.Textbox(label="API Key (Optional - Set in Config.txt)", placeholder="Enter your API key here",
116
  type="password")
117
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
118
  value="default,no_keyword_set")
119
  batch_size_input = gr.Slider(minimum=1, maximum=10, value=1, step=1,
120
  label="Batch Size (Number of videos to process simultaneously)")
121
+ timestamp_option = gr.Checkbox(label="Include Timestamps", value=True)
 
122
  keep_original_video = gr.Checkbox(label="Keep Original Video", value=False)
123
  # First, create a checkbox to toggle the chunking options
124
  chunking_options_checkbox = gr.Checkbox(label="Show Chunking Options", value=False)
 
303
  start_seconds, api_name, api_key,
304
  False, False, False, False, 0.01, None, keywords, None, diarize,
305
  end_time=end_seconds,
306
+ include_timestamps=timestamp_option,
307
  metadata=video_metadata,
308
  use_chunking=chunking_options_checkbox,
309
  chunk_options=chunk_options,
App_Function_Libraries/Gradio_UI/View_DB_Items_tab.py CHANGED
@@ -3,7 +3,6 @@
3
  #
4
  # Imports
5
  import html
6
- import sqlite3
7
  #
8
  # External Imports
9
  import gradio as gr
@@ -13,8 +12,6 @@ from App_Function_Libraries.DB.DB_Manager import view_database, get_all_document
13
  fetch_paginated_data, fetch_item_details, get_latest_transcription, list_prompts, fetch_prompt_details, \
14
  load_preset_prompts
15
  from App_Function_Libraries.DB.SQLite_DB import get_document_version
16
- from App_Function_Libraries.Utils.Utils import get_database_path, format_text_with_line_breaks
17
- #
18
  #
19
  ####################################################################################################
20
  #
@@ -46,7 +43,7 @@ def create_prompt_view_tab():
46
  for prompt_name in prompts:
47
  details = fetch_prompt_details(prompt_name)
48
  if details:
49
- title, _, _, _, _ = details
50
  author = "Unknown" # Assuming author is not stored in the current schema
51
  table_html += f"<tr><td style='border: 1px solid black; padding: 8px;'>{html.escape(title)}</td><td style='border: 1px solid black; padding: 8px;'>{html.escape(author)}</td></tr>"
52
  prompt_choices.append((title, title)) # Using title as both label and value
@@ -77,20 +74,27 @@ def create_prompt_view_tab():
77
  def display_selected_prompt(prompt_name):
78
  details = fetch_prompt_details(prompt_name)
79
  if details:
80
- title, description, system_prompt, user_prompt, keywords = details
 
 
 
 
 
 
 
81
  html_content = f"""
82
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 20px;">
83
- <h3>{html.escape(title)}</h3>
84
- <p><strong>Description:</strong> {html.escape(description or '')}</p>
85
  <div style="margin-top: 10px;">
86
  <strong>System Prompt:</strong>
87
- <pre style="white-space: pre-wrap; word-wrap: break-word;">{html.escape(system_prompt or '')}</pre>
88
  </div>
89
  <div style="margin-top: 10px;">
90
  <strong>User Prompt:</strong>
91
- <pre style="white-space: pre-wrap; word-wrap: break-word;">{html.escape(user_prompt or '')}</pre>
92
  </div>
93
- <p><strong>Keywords:</strong> {html.escape(keywords or '')}</p>
94
  </div>
95
  """
96
  return html_content
@@ -123,110 +127,6 @@ def create_prompt_view_tab():
123
  inputs=[prompt_selector],
124
  outputs=[selected_prompt_display]
125
  )
126
- # def create_prompt_view_tab():
127
- # with gr.TabItem("View Prompt Database"):
128
- # gr.Markdown("# View Prompt Database Entries")
129
- # with gr.Row():
130
- # with gr.Column():
131
- # entries_per_page = gr.Dropdown(choices=[10, 20, 50, 100], label="Entries per Page", value=10)
132
- # page_number = gr.Number(value=1, label="Page Number", precision=0)
133
- # view_button = gr.Button("View Page")
134
- # next_page_button = gr.Button("Next Page")
135
- # previous_page_button = gr.Button("Previous Page")
136
- # pagination_info = gr.Textbox(label="Pagination Info", interactive=False)
137
- # with gr.Column():
138
- # results_display = gr.HTML()
139
- #
140
- # # FIXME - SQL functions to be moved to DB_Manager
141
- #
142
- # def view_database(page, entries_per_page):
143
- # offset = (page - 1) * entries_per_page
144
- # try:
145
- # with sqlite3.connect(get_database_path('prompts.db')) as conn:
146
- # cursor = conn.cursor()
147
- # cursor.execute('''
148
- # SELECT p.name, p.details, p.system, p.user, GROUP_CONCAT(k.keyword, ', ') as keywords
149
- # FROM Prompts p
150
- # LEFT JOIN PromptKeywords pk ON p.id = pk.prompt_id
151
- # LEFT JOIN Keywords k ON pk.keyword_id = k.id
152
- # GROUP BY p.id
153
- # ORDER BY p.name
154
- # LIMIT ? OFFSET ?
155
- # ''', (entries_per_page, offset))
156
- # prompts = cursor.fetchall()
157
- #
158
- # cursor.execute('SELECT COUNT(*) FROM Prompts')
159
- # total_prompts = cursor.fetchone()[0]
160
- #
161
- # results = ""
162
- # for prompt in prompts:
163
- # # Escape HTML special characters and replace newlines with <br> tags
164
- # title = html.escape(prompt[0]).replace('\n', '<br>')
165
- # details = html.escape(prompt[1] or '').replace('\n', '<br>')
166
- # system_prompt = html.escape(prompt[2] or '')
167
- # user_prompt = html.escape(prompt[3] or '')
168
- # keywords = html.escape(prompt[4] or '').replace('\n', '<br>')
169
- #
170
- # results += f"""
171
- # <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 20px;">
172
- # <div style="display: grid; grid-template-columns: 1fr 1fr; gap: 10px;">
173
- # <div><strong>Title:</strong> {title}</div>
174
- # <div><strong>Details:</strong> {details}</div>
175
- # </div>
176
- # <div style="margin-top: 10px;">
177
- # <strong>User Prompt:</strong>
178
- # <pre style="white-space: pre-wrap; word-wrap: break-word;">{user_prompt}</pre>
179
- # </div>
180
- # <div style="margin-top: 10px;">
181
- # <strong>System Prompt:</strong>
182
- # <pre style="white-space: pre-wrap; word-wrap: break-word;">{system_prompt}</pre>
183
- # </div>
184
- # <div style="margin-top: 10px;">
185
- # <strong>Keywords:</strong> {keywords}
186
- # </div>
187
- # </div>
188
- # """
189
- #
190
- # total_pages = (total_prompts + entries_per_page - 1) // entries_per_page
191
- # pagination = f"Page {page} of {total_pages} (Total prompts: {total_prompts})"
192
- #
193
- # return results, pagination, total_pages
194
- # except sqlite3.Error as e:
195
- # return f"<p>Error fetching prompts: {e}</p>", "Error", 0
196
- #
197
- # def update_page(page, entries_per_page):
198
- # results, pagination, total_pages = view_database(page, entries_per_page)
199
- # next_disabled = page >= total_pages
200
- # prev_disabled = page <= 1
201
- # return results, pagination, page, gr.update(interactive=not next_disabled), gr.update(
202
- # interactive=not prev_disabled)
203
- #
204
- # def go_to_next_page(current_page, entries_per_page):
205
- # next_page = current_page + 1
206
- # return update_page(next_page, entries_per_page)
207
- #
208
- # def go_to_previous_page(current_page, entries_per_page):
209
- # previous_page = max(1, current_page - 1)
210
- # return update_page(previous_page, entries_per_page)
211
- #
212
- # view_button.click(
213
- # fn=update_page,
214
- # inputs=[page_number, entries_per_page],
215
- # outputs=[results_display, pagination_info, page_number, next_page_button, previous_page_button]
216
- # )
217
- #
218
- # next_page_button.click(
219
- # fn=go_to_next_page,
220
- # inputs=[page_number, entries_per_page],
221
- # outputs=[results_display, pagination_info, page_number, next_page_button, previous_page_button]
222
- # )
223
- #
224
- # previous_page_button.click(
225
- # fn=go_to_previous_page,
226
- # inputs=[page_number, entries_per_page],
227
- # outputs=[results_display, pagination_info, page_number, next_page_button, previous_page_button]
228
- # )
229
-
230
 
231
  def format_as_html(content, title):
232
  escaped_content = html.escape(content)
 
3
  #
4
  # Imports
5
  import html
 
6
  #
7
  # External Imports
8
  import gradio as gr
 
12
  fetch_paginated_data, fetch_item_details, get_latest_transcription, list_prompts, fetch_prompt_details, \
13
  load_preset_prompts
14
  from App_Function_Libraries.DB.SQLite_DB import get_document_version
 
 
15
  #
16
  ####################################################################################################
17
  #
 
43
  for prompt_name in prompts:
44
  details = fetch_prompt_details(prompt_name)
45
  if details:
46
+ title, _, _, _, _, _ = details
47
  author = "Unknown" # Assuming author is not stored in the current schema
48
  table_html += f"<tr><td style='border: 1px solid black; padding: 8px;'>{html.escape(title)}</td><td style='border: 1px solid black; padding: 8px;'>{html.escape(author)}</td></tr>"
49
  prompt_choices.append((title, title)) # Using title as both label and value
 
74
  def display_selected_prompt(prompt_name):
75
  details = fetch_prompt_details(prompt_name)
76
  if details:
77
+ title, author, description, system_prompt, user_prompt, keywords = details
78
+ # Handle None values by converting them to empty strings
79
+ description = description or ""
80
+ system_prompt = system_prompt or ""
81
+ user_prompt = user_prompt or ""
82
+ author = author or "Unknown"
83
+ keywords = keywords or ""
84
+
85
  html_content = f"""
86
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 20px;">
87
+ <h3>{html.escape(title)}</h3> <h4>by {html.escape(author)}</h4>
88
+ <p><strong>Description:</strong> {html.escape(description)}</p>
89
  <div style="margin-top: 10px;">
90
  <strong>System Prompt:</strong>
91
+ <pre style="white-space: pre-wrap; word-wrap: break-word;">{html.escape(system_prompt)}</pre>
92
  </div>
93
  <div style="margin-top: 10px;">
94
  <strong>User Prompt:</strong>
95
+ <pre style="white-space: pre-wrap; word-wrap: break-word;">{html.escape(user_prompt)}</pre>
96
  </div>
97
+ <p><strong>Keywords:</strong> {html.escape(keywords)}</p>
98
  </div>
99
  """
100
  return html_content
 
127
  inputs=[prompt_selector],
128
  outputs=[selected_prompt_display]
129
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  def format_as_html(content, title):
132
  escaped_content = html.escape(content)