Spaces:
Sleeping
Sleeping
import os | |
import json | |
import logging | |
from typing import Optional | |
import asyncio | |
import gradio as gr | |
from utils.response_manager import ResponseManager | |
class ChatbotInterface: | |
def __init__(self, | |
model: str = "gpt-4o-mini", | |
temperature: float = 0, | |
max_output_tokens: int = 600, | |
max_num_results: int = 5, | |
vector_store_id: Optional[str] = None, | |
api_key: Optional[str] = None, | |
meta_prompt_file: Optional[str] = None, | |
config_path: str = 'config/gradio_config.json' | |
): | |
""" | |
Initialize the ChatbotInterface with configuration and custom parameters for ResponseManager. | |
:param config_path: Path to the configuration JSON file. | |
:param model: The OpenAI model to use (default: 'gpt-4o-mini'). | |
:param temperature: The temperature for response generation (default: 0). | |
:param max_output_tokens: The maximum number of output tokens (default: 800). | |
:param max_num_results: The maximum number of search results to return (default: 15). | |
:param vector_store_id: The ID of the vector store to use for file search. | |
:param api_key: The OpenAI API key for authentication. | |
:param meta_prompt_file: Path to the meta prompt file . | |
""" | |
# Parameters for UI | |
self.config = self.load_config(config_path) | |
self.title = self.config["chatbot_title"] | |
self.description = self.config["chatbot_description"] | |
self.input_placeholder = self.config["chatbot_input_placeholder"] | |
self.output_label = self.config["chatbot_output_label"] | |
# Parameters for ResponseManager class | |
self.model = model | |
self.temperature = temperature | |
self.max_output_tokens = max_output_tokens | |
self.max_num_results = max_num_results | |
self.vector_store_id = vector_store_id | |
self.api_key = api_key | |
self.meta_prompt_file = meta_prompt_file | |
def load_config(config_path: str) -> dict: | |
""" | |
Load the configuration for Gradio GUI interface from the JSON file. | |
:param config_path: Path to the configuration JSON file. | |
:return: Configuration dictionary. | |
""" | |
logging.info(f"Loading configuration from {config_path}...") | |
if not os.path.exists(config_path): | |
logging.error(f"Configuration file not found: {config_path}") | |
raise FileNotFoundError(f"Configuration file not found: {config_path}") | |
with open(config_path, 'r') as config_file: | |
config = json.load(config_file) | |
required_keys = [ | |
"chatbot_title", | |
"chatbot_description", | |
"chatbot_input_placeholder", | |
"chatbot_output_label" | |
] | |
for key in required_keys: | |
if key not in config: | |
logging.error(f"Missing required configuration key: {key}") | |
raise ValueError(f"Missing required configuration key: {key}") | |
return config | |
def create_interface(self) -> gr.Blocks: | |
""" | |
Create the Gradio Blocks interface that displays a single container including both | |
the text input and a small arrow submit button. The interface will clear the text input | |
after each message is submitted. | |
""" | |
logging.info("Creating Gradio interface...") | |
with gr.Blocks() as demo: | |
# Title and description area. | |
gr.Markdown(f"## {self.title}\n{self.description}") | |
# Chatbot output area. | |
chatbot_output = gr.Chatbot(label=self.output_label, type="messages") | |
# Session-specific states | |
conversation_state = gr.State([]) | |
response_manager_state = gr.State(None) | |
# Row area. | |
with gr.Row(elem_id="input-container", equal_height=True): | |
reset = gr.ClearButton( | |
value="Clear history π", | |
variant="secondary", | |
elem_id="reset-button", | |
size="lg" | |
) | |
user_input = gr.Textbox( | |
lines=1, | |
show_label=False, # Hide label for a unified look. | |
elem_id="chat-input", | |
placeholder=self.input_placeholder, | |
scale=500, | |
) | |
# Initialization function for session-specific response manager | |
def init_response_manager(): | |
try: | |
rm = ResponseManager( | |
model=self.model, | |
temperature=self.temperature, | |
max_output_tokens=self.max_output_tokens, | |
max_num_results=self.max_num_results, | |
vector_store_id=self.vector_store_id, | |
api_key=self.api_key, | |
meta_prompt_file=self.meta_prompt_file | |
) | |
logging.info( | |
"ChatbotInterface initialized with the following parameters:\n" | |
f" - Model: {self.model}\n" | |
f" - Temperature: {self.temperature}\n" | |
f" - Max Output Tokens: {self.max_output_tokens}\n" | |
f" - Max Number of Results: {self.max_num_results}\n" | |
) | |
rm.reset_conversation() | |
return rm | |
except Exception as e: | |
logging.error(f"Failed to initialize ResponseManager: {e}") | |
raise | |
# Reset function updated to reset ResponseManager | |
def reset_output(): | |
response_manager = init_response_manager() | |
return [], [], response_manager, "" # Returns [chatbot_output,conversation_state, response_manager_state, user_input] | |
# Process input now uses session-specific ResponseManager | |
async def process_input(user_message, chat_history, response_manager): | |
updated_history = await response_manager.generate_response(user_message, chat_history) | |
return updated_history, updated_history, response_manager, "" # Returns [chatbot_output, conversation_state, response_manager_state, user_input] | |
# Initialize ResponseManager object for a session on load | |
demo.load( | |
fn=init_response_manager, | |
inputs=None, | |
outputs=response_manager_state # Each session state gets its own instance of ResponseManager class | |
) | |
# CLearButton action | |
reset.click( | |
fn=reset_output, | |
inputs=None, | |
outputs=[chatbot_output, conversation_state, response_manager_state, user_input] | |
) | |
# Enter to trigger response generation | |
user_input.submit( | |
fn=process_input, | |
inputs=[user_input, conversation_state, response_manager_state], | |
outputs=[chatbot_output, conversation_state, response_manager_state, user_input] | |
) | |
logging.info("Gradio interface created successfully.") | |
return demo |