import gradio as gr
from src.utils import LLMHandler, initialize_newsletter, integrate_personalized_text, build_context, build_prompt
from src.utils_api import get_recommendations
import yaml
import logging
import argparse
import os
import tempfile
# logging.basicConfig(filename='logs/app.log', encoding='utf-8', level=logging.info)
logging.basicConfig(level=logging.INFO)
def main():
# get arguments with argparse
parser = argparse.ArgumentParser(description='Newsletter Generator')
parser.add_argument('--config-file', type=str, default='./config/config.yaml', help='Path to the configuration file.')
args = parser.parse_args()
logging.info("Starting the Newsletter Generator app...")
# Load configuration from YAML file
logging.info("Loading configuration from config.yaml...")
with open(args.config_file, "r") as file:
config = yaml.safe_load(file)
# setup
#try:
# os.environ["RECOMMENDER_URL"] = config['recommender_api']['base_url']
# os.environ["RECOMMENDER_KEY"] = config['recommender_api']['key']
# os.environ["OPENAI_KEY"] = config['llm']['api_key']
#except:
# pass
llm_settings = config['llm']
config['llm']['api_key'] = os.environ["OPENAI_KEY"]
newsletter_meta_info = config['newsletter']
logging.debug(f"Configuration loaded: {config}")
# Initialize the LLM handler
llm_handler = LLMHandler(**llm_settings)
logging.info(f"LLM handler initialized with the following settings: {config['llm']}")
# Define the function to generate the newsletter using the OpenAI API
def generate_newsletter(
customer_id,
model_name,
temperature,
max_tokens,
system_message,
textual_preferences,
progress=gr.Progress()
):
# get recommendations
progress(0.1, "Fetching Client History...")
logging.info("Getting recommendations...")
customer_info, recommendations, transactions = get_recommendations(
customer_id,
max_recs=newsletter_meta_info['max_recommendations'],
max_transactions=newsletter_meta_info['max_recents_items'])
logging.debug(f"Recommendations: {recommendations}")
logging.debug(f"Transactions: {transactions}")
print("cusomter info", customer_info)
# Load the html template and replace the placeholders for images with the actual content
logging.info("Initializing newsletter template...")
progress(0.5, "Initializing personalized content...")
newsletter_text = initialize_newsletter(newsletter_meta_info, transactions, recommendations)
# Build context from the user preferences, the recommendations and the transactions
context = build_context(
recommendations,
transactions,
textual_preferences,
customer_info)
logging.info(f"Context: {context}")
# Build the prompt for the LLM
progress(0.7, "Generating personalized content...")
prompt = build_prompt(context)
logging.info(f"Prompt: {prompt}")
# Generate the newsletter
sections = llm_handler.generate(
prompt,
model_name,
temperature,
max_tokens,
system_message)
logging.info(f"Sections: {sections}")
# Intergrate personalized text
logging.info("Integrating personalized text...")
newsletter_text = integrate_personalized_text(newsletter_text, customer_info, sections)
# Save HTML to a temporary file for download
with tempfile.NamedTemporaryFile(delete=False, suffix=".html") as temp_file:
temp_file.write(newsletter_text.encode("utf-8"))
temp_file_path = temp_file.name
progress(1.0)
return newsletter_text, temp_file_path
logging.info("Creating interface...")
with gr.Blocks() as demo:
# Header Section
gr.Markdown("## AI-Powered Newsletter for Fashion Brands", elem_id="header")
# Input Section
with gr.Row():
customer_id = gr.Dropdown(
label="Customer ID",
#value="04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c",
interactive=True,
choices=[
("customer 1", "04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c"),
("customer 2", "1abaca5cd299000720538c70ba2ed246db6731bce924b5b4ca81770a47842656"),
("customer 3", "1741b0d1b2c29994084b7312001c1b11ab8b112b3fd05ac765f4d232afdc4eaf")
]
)
with gr.Row():
textual_preferences = gr.Textbox(
label="Newsletter Preferences",
placeholder="Enter rich newsletter preferences."
)
# Advanced Settings
with gr.Accordion("⚙️ Advanced Settings", open=False):
with gr.Row():
model_name = gr.Dropdown(
label="LLM Model",
choices=["gpt-3.5-turbo", "gpt-4o"],
value=llm_handler.model_name
)
temperature = gr.Slider(
label="Temperature",
minimum=0.0,
maximum=1.0,
step=0.05,
value=llm_handler.default_temperature
)
with gr.Row():
max_tokens = gr.Number(
label="Max Tokens",
value=llm_handler.default_max_tokens,
precision=0
)
system_message = gr.Textbox(
label="System Message",
placeholder="Enter a custom system message (optional).",
value=llm_handler.default_system_message,
visible=False
)
# User Context (Hidden by Default)
with gr.Accordion("🧑💻 User Context", open=False, visible=False):
pass # Placeholder for future user context integration.
# Output Section
with gr.Row():
generate_button = gr.Button("Generate Personalized Newsletter", variant="primary")
download = gr.DownloadButton("Download")
newsletter_output = gr.HTML(
label="Generated Newsletter",
value="
",
min_height=500,
render=True
)
# Event Binding
generate_button.click(
fn=generate_newsletter,
inputs=[
customer_id,
model_name,
temperature,
max_tokens,
system_message,
textual_preferences
],
outputs=[newsletter_output, download]
)
# Launch App
demo.queue().launch(
share=config['app']['share'],
server_port=config['app']['server_port']
)
# Gradio interface for the app
""" logging.info("Creating interface...")
with gr.Blocks() as demo:
gr.Markdown("### Newsletter Generator")
customer_id = gr.Textbox(label="Client ID", value="04a183a27a6877e560e1025216d0a3b40d88668c68366da17edfb18ed89c574c")
textual_preferences = gr.Textbox(label="Newsletter preferences", placeholder="The newsletter should be catchy.")
# llm_preferences = gr.Textbox(label="LLM Preferences", placeholder="Enter LLM preferences.", visible=False)
# create an openable block for the llm preferences
with gr.Accordion("LLM Preferences", open=False):
model_name = gr.Dropdown(label="Model Name", choices=["gpt-3.5-turbo", "gpt-4o"], value=llm_handler.model_name)
temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, step=0.05, value=llm_handler.default_temperature)
max_tokens = gr.Number(label="Max Tokens", value=llm_handler.default_max_tokens)
system_message = gr.Textbox(label="System Message", placeholder="Enter the system message or Leave Blank.", value=llm_handler.default_system_message)
with gr.Accordion("User Context", open=False, visible=False):
# get profiled user context
pass
generate_button = gr.Button("Generate Newsletter")
# create a button to open the newsletter in a new tab
download = gr.DownloadButton(label="Download Newsletter")
newsletter_output = gr.HTML(label="Generated Newsletter", min_height="500", value="
")
generate_button.click(
fn=generate_newsletter,
inputs=[
customer_id,
# llm preferences
model_name,
temperature,
max_tokens,
system_message,
# newsletter preferences
textual_preferences],
outputs=[newsletter_output, download]
)
demo.queue().launch(share=config['app']['share'], server_port=config['app']['server_port'])"""
if __name__ == "__main__":
main()