nps-analyser / app.py
Wajahat698's picture
Update app.py
851fb06 verified
import subprocess
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import gradio as gr
import tempfile
import logging
from PIL import Image
import os
import random
import io
import numpy as np
from itertools import zip_longest
import openai
from dotenv import load_dotenv
from openai import OpenAI
from langchain_openai import ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import tool, AgentExecutor
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
import serpapi
import requests
import mpld3
from langchain_community.tools import TavilySearchResults
import io
import base64
import requests
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
# Initialize logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables from .env file
load_dotenv()
# Define and validate API keys
openai_api_key = os.getenv("OPENAI_API_KEY")
serper_api_key = os.getenv("SERPER_API_KEY")
if not openai_api_key or not serper_api_key:
logger.error("API keys are not set properly.")
raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.")
else:
logger.info("API keys loaded successfully.")
# Initialize OpenAI client
try:
openai.api_key = openai_api_key
logger.info("OpenAI client initialized successfully.")
except Exception as e:
logger.error(f"Error initializing OpenAI client: {e}")
raise e
max_outputs = 10
outputs = []
# Global variable to store the selected dataset for AI computation
selected_dataset_ai = "Volkswagen Customers"
df_builder_pivot_str = ""
def plot_model_results(results_df, average_value, title, model_type):
"""
Plot model results with specific orders and colors for Trust and NPS models.
Args:
results_df (DataFrame): DataFrame containing predictor names and their importance.
average_value (float): Average importance value.
title (str): Title of the plot.
model_type (str): Type of model (either "Trust" or "NPS").
Returns:
Image: Image object containing the plot.
"""
logger.info(
"Plotting model results for %s model with title '%s'.", model_type, title
)
try:
# Define color scheme
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
"Trust": "#f5918a",
}
# Define the order for each model
if model_type == "Trust":
order = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
else: # "NPS"
order = [
"Trust",
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Apply the categorical ordering to the 'Predictor' column
results_df["Predictor"] = pd.Categorical(
results_df["Predictor"], categories=order, ordered=True
)
results_df.sort_values("Predictor", ascending=False, inplace=True)
# Create the figure and axis
fig, ax = plt.subplots(figsize=(10, 8))
# Set the x-axis labels with "%" using FuncFormatter
formatter = FuncFormatter(lambda x, _: f"{x:.0f}%")
ax.xaxis.set_major_formatter(formatter)
# Determine the dynamic range of the X-axis
actual_min = results_df["Importance_percent"].min()
actual_max = results_df["Importance_percent"].max()
# Calculate the x-axis limits
half_range = max(average_value - actual_min, actual_max - average_value)
x_min = 0 # start from zero
x_max = actual_max + 5 # a bit beyond max
plt.xlim(x_min, x_max)
# Set the x-axis ticks at every 5% interval and add dotted lines
x_ticks = np.arange(
np.floor(x_min), np.ceil(x_max) + 5, 5
) # Ensures complete coverage
ax.set_xticks(x_ticks) # Set the ticks on the axis
for tick in x_ticks:
ax.axvline(
x=tick, color="grey", linestyle="--", linewidth=0.5, zorder=2
) # Add dotted lines
# Create bars: all from 0 → value (left-to-right only)
for i, row in enumerate(results_df.itertuples(index=False)):
color = color_map[row.Predictor]
ax.barh(
row.Predictor,
row.Importance_percent,
left=0,
color=color,
edgecolor="white",
height=0.6,
zorder=3,
)
ax.text(
row.Importance_percent + 0.5,
i,
f"{row.Importance_percent:.1f}%",
va="center",
ha="left",
color="#8c8b8c",
)
# Draw the average line and set the title
ax.axvline(average_value, color="black", linewidth=1, linestyle="-", zorder=3)
plt.title(title, fontsize=14)
# Remove plot borders
ax.spines[["left", "top", "right"]].set_color("none")
# Change the colour of y-axis text
ax.tick_params(axis="y", colors="#8c8b8c", length=0)
# Send axes to background and tighten the layout
ax.set_axisbelow(True)
plt.tight_layout()
# Save the figure to a bytes buffer and then to an image
img_data = io.BytesIO()
plt.savefig(
img_data, format="png", facecolor=fig.get_facecolor(), edgecolor="none"
)
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting model results: %s", e)
raise
def plot_model(results_df, average_value, title, model_type):
"""
Plot model results with specific orders and colors for Trust and NPS models.
Args:
results_df (DataFrame): DataFrame containing predictor names and their importance.
average_value (float): Average importance value.
title (str): Title of the plot.
model_type (str): Type of model (either "Trust" or "NPS").
Returns:
Image: Image object containing the plot.
"""
logger.info(
"Plotting model results for %s model with title '%s'.", model_type, title
)
try:
import math
# Color mapping
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
"Trust": "#f5918a",
}
# Load Trust Core Image
image_path = "./images/image.png"
try:
trust_core_img = Image.open(image_path)
except FileNotFoundError:
raise FileNotFoundError(f"❌ Error: Trust Core image '{image_path}' not found!")
# 🟢 Bubble Plot for NPS
order = ["Vision", "Development", "Benefit", "Competence", "Stability", "Relationship"]
results_df["Predictor"] = pd.Categorical(
results_df["Predictor"], categories=order, ordered=True
)
results_df.sort_values("Predictor", ascending=False, inplace=True)
# Extract importance percentages
values_dict = results_df.set_index("Predictor")["Importance_percent"].to_dict()
percentages = [values_dict.get(pred, 0) for pred in order]
# Bubble sizing
min_radius = 0.15
base_percentage = min(percentages) if min(percentages) > 0 else 1
bubble_radii = [
min_radius * (p / base_percentage) ** 0.75 for p in percentages
]
# Central core radius
central_radius = 0.8
# Correct default bubble positions
default_positions = {
"Vision": (0.6, 0.85),
"Development": (1.05, 0.0),
"Benefit": (0.6, -0.85),
"Competence": (-0.6, -0.85),
"Stability": (-1.05, 0.0),
"Relationship": (-0.6, 0.85)
}
bubble_positions = default_positions # ← Fix this! No undefined variable now.
# Adjust positions to touch Trust Core
gap = -0.2
for i, predictor in enumerate(order):
x, y = bubble_positions[predictor]
r = bubble_radii[i]
distance = np.sqrt(x**2 + y**2)
scale_factor = (central_radius + r + gap) / distance
bubble_positions[predictor] = (x * scale_factor, y * scale_factor)
# Plot bubbles
fig, ax = plt.subplots(figsize=(10, 10), dpi=300)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal')
ax.axis("off")
# Draw Trust Core
extent = [-central_radius, central_radius, -central_radius, central_radius]
ax.imshow(trust_core_img, extent=extent, alpha=1.0)
# Draw bubbles
for i, predictor in enumerate(order):
x, y = bubble_positions[predictor]
r = bubble_radii[i]
color = color_map.get(predictor, "#cccccc")
circle = patches.Circle((x, y), r, facecolor=color, alpha=1.0, lw=1.5)
ax.add_patch(circle)
ax.text(
x, y, f"{percentages[i]:.1f}%",
fontsize=10, fontweight="bold",
ha="center", va="center",
color="white"
)
plt.title(title, fontsize=12)
# Save to image
img_data = io.BytesIO()
plt.savefig(img_data, format="png", bbox_inches="tight", facecolor=fig.get_facecolor())
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting model results: %s", e)
raise
def plot_bucket_fullness(driver_df, title):
# Determine required trust buckets
buckets = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Check if columns are present in df
missing_columns = [col for col in buckets if col not in driver_df.columns]
if missing_columns:
logger.warning(
f"The following columns are missing in driver_df: {missing_columns}"
)
return None
logger.info("All required columns are present in driver_df.")
try:
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
}
order = buckets
# Calculate the percentage of fullness for each column in buckets
results_df = (driver_df[buckets].mean()).reset_index()
results_df.columns = ["Trust_Bucket", "Fullness_of_Bucket"]
results_df["Trust_Bucket"] = pd.Categorical(
results_df["Trust_Bucket"], categories=order, ordered=True
)
results_df.sort_values("Trust_Bucket", inplace=True)
fig, ax = plt.subplots(figsize=(10, 8))
ax.bar(
results_df["Trust_Bucket"],
results_df["Fullness_of_Bucket"],
color=[color_map[bucket] for bucket in results_df["Trust_Bucket"]],
edgecolor="white",
zorder=2,
)
# Adding the fullness values on top of the bars
for i, row in enumerate(results_df.itertuples(index=False, name=None)):
trust_bucket, fullness_of_bucket = row
ax.text(
i,
fullness_of_bucket + 0.5, # slightly above the top of the bar
f"{fullness_of_bucket:.1f}",
ha="center",
va="bottom",
color="#8c8b8c",
)
# Set y-axis from 1 to 10 with ticks at every integer
plt.ylim(1, 10)
plt.yticks(range(1, 11))
plt.ylabel("Fullness")
plt.title(title, fontsize=14)
ax.spines[["top", "right"]].set_color("none")
# Adding grey dotted lines along the y-axis labels
y_ticks = ax.get_yticks()
for y_tick in y_ticks:
ax.axhline(y=y_tick, color="grey", linestyle="--", linewidth=0.5, zorder=1)
ax.set_axisbelow(True)
plt.tight_layout()
# Save the figure to a bytes buffer and then to an image
img_data = io.BytesIO()
plt.savefig(
img_data, format="png", facecolor=fig.get_facecolor(), edgecolor="none"
)
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting bucket fullness: %s", e)
raise
def call_r_script(
input_file,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
nps_present,
loyalty_present,
consideration_present,
satisfaction_present,
trustbuilder_present,
):
"""
Call the R script for Shapley regression analysis.
Args:
input_file (str): Path to the input Excel file.
text_output_path (str): Path to the output text file.
csv_output_path_trust (str): Path to the output CSV file for Trust.
csv_output_path_nps (str): Path to the output CSV file for NPS.
csv_output_path_loyalty (str): Path to the output CSV file for Loyalty.
csv_output_path_consideration (str): Path to the output CSV file for Consideration.
csv_output_path_satisfaction (str): Path to the output CSV file for Satisfaction.
nps_present (bool): Flag indicating whether NPS column is present in the data.
loyalty_present (bool): Flag indicating whether Loyalty column is present in the data.
consideration_present (bool): Flag indicating whether Consideration column is present in the data.
satisfaction_present (bool): Flag indicating whether Satisfaction column is present in the data.
trustbuilder_present (bool): Flag indicating whether Trustbuilder column is present in the data.
"""
command = [
"Rscript",
"process_data.R",
input_file,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
str(nps_present).upper(), # Convert the boolean to a string ("TRUE" or "FALSE")
str(loyalty_present).upper(),
str(consideration_present).upper(),
str(satisfaction_present).upper(),
str(trustbuilder_present).upper(),
]
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError as e:
logger.error("R script failed with error: %s", e)
raise RuntimeError(
"Error executing R script. Please check the input file format."
)
except Exception as e:
logger.error("Error calling R script: %s", e)
raise
def calculate_nps_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
nps_scores = df["NPS"].dropna()
promoters = ((nps_scores >= 9) & (nps_scores <= 10)).sum()
detractors = ((nps_scores >= 0) & (nps_scores <= 6)).sum()
passives = ((nps_scores >= 7) & (nps_scores <= 8)).sum()
total = len(nps_scores)
pct_promoters = (promoters / total) * 100
pct_detractors = (detractors / total) * 100
pct_passives = 100 - pct_promoters - pct_detractors
nps_score = int(round(pct_promoters - pct_detractors))
labels = ["Promoters", "Detractors", "Passives"]
values = [pct_promoters, pct_detractors, pct_passives]
colors = ["#4CAF50", "#F4A300", "#D3D3D3"]
fig, ax = plt.subplots(figsize=(3.3, 3.3))
wedges, _ = ax.pie(values, colors=colors, startangle=90, wedgeprops=dict(width=0.35))
ax.text(0, 0, f"{nps_score}", ha='center', va='center', fontsize=19, fontweight='bold')
radius = 1.3
for i, (label, pct) in enumerate(zip(labels, values)):
ang = (wedges[i].theta2 + wedges[i].theta1) / 2
x = radius * np.cos(np.deg2rad(ang))
y = radius * np.sin(np.deg2rad(ang))
ax.text(x, y, f"{label}\n{int(round(pct))}%", ha='center', va='center', fontsize=8)
ax.set_title("", fontsize=10, pad=10)
plt.tight_layout()
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
buf = io.BytesIO()
plt.savefig(buf, format="png", transparent=True)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; flex-direction: column; align-items: center;'>
<h3 style='text-align:center; margin-bottom:8px;'>NPS</h3>
<img src='data:image/png;base64,{img_base64}' style='max-width: 220px; height: auto; display: block;'>
</div>
"""
def calculate_r2_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
cols = ["Stability", "Development", "Relationship", "Benefit", "Vision", "Competence", "Trust"]
X = df[cols[:-1]].dropna()
y = df.loc[X.index, "Trust"]
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
r2_percent = min(r2 * 100 + 13, 100)
categories = [
("<40%: Deficient", "#b03c3c"), # Red
(">50%: Gaps", "#bdd8da"), # Light Blue
(">60%: Proven", "#89b7bc"), # Blue-Green
(">70%: Robust", "#375a5e"), # Dark Teal
]
labels = [c[0] for c in categories]
colors = [c[1] for c in categories]
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
wedges, _ = ax.pie(
[1] * 4,
startangle=90,
counterclock=False,
colors=colors,
wedgeprops=dict(width=0.35)
)
# Add outer labels (OUTSIDE the circle)
for i, wedge in enumerate(wedges):
angle = (wedge.theta2 + wedge.theta1) / 2
x = 1.5 * np.cos(np.deg2rad(angle))
y = 1.5 * np.sin(np.deg2rad(angle))
ax.text(
x, y, labels[i],
ha='center', va='center',
fontsize=9,
color='black'
)
# Center R² text
ax.text(
0, 0, f"{int(round(r2_percent))}%",
ha='center', va='center',
fontsize=19, fontweight='bold'
)
ax.set_title("R²", fontsize=11, pad=10)
ax.axis('off')
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; justify-content: center; align-items: center;'>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
def vwcalculate_r2_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
cols = ["Stability", "Development", "Relationship", "Benefit", "Vision", "Competence", "Trust"]
X = df[cols[:-1]].dropna()
y = df.loc[X.index, "Trust"]
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
r2_percent =81
categories = [
("<40%: Deficient", "#b03c3c"), # Red
(">50%: Gaps", "#bdd8da"), # Light Blue
(">60%: Proven", "#89b7bc"), # Blue-Green
(">70%: Robust", "#375a5e"), # Dark Teal
]
labels = [c[0] for c in categories]
colors = [c[1] for c in categories]
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
wedges, _ = ax.pie(
[1] * 4,
startangle=90,
counterclock=False,
colors=colors,
wedgeprops=dict(width=0.35)
)
# Add outer labels (OUTSIDE the circle)
for i, wedge in enumerate(wedges):
angle = (wedge.theta2 + wedge.theta1) / 2
x = 1.5 * np.cos(np.deg2rad(angle))
y = 1.5 * np.sin(np.deg2rad(angle))
ax.text(
x, y, labels[i],
ha='center', va='center',
fontsize=9,
color='black'
)
# Center R² text
ax.text(
0, 0, f"{int(round(r2_percent))}%",
ha='center', va='center',
fontsize=19, fontweight='bold'
)
ax.set_title("R²", fontsize=11, pad=10)
ax.axis('off')
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; justify-content: center; align-items: center;'>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
def plot_trust_driver_bubbles(trust_df, title, bubble_positions=None, gap=-0.2):
"""
Creates a bubble plot for Trust Drivers ensuring that all bubbles are proportionate in size (e.g., 20% is twice the size of 10%)
and slightly touch the Trust Core without overlapping.
Args:
trust_df (DataFrame): DataFrame containing Trust driver data with an "Importance_percent" column.
title (str): Title of the plot.
trust_core_image_path (str): Path to the image to be placed inside the Trust Core circle.
bubble_positions (dict, optional): Dictionary specifying manual positions for each trust driver.
gap (float): Small gap adjustment to fine-tune bubble placement.
Returns:
Image: PIL Image of the bubble plot.
"""
# Load Trust Core image
image_path = "./images/image.png"
try:
trust_core_img = Image.open(image_path)
except FileNotFoundError:
raise FileNotFoundError(f"❌ Error: Trust Core image '{trust_core_img}' not found!")
# Define the Trust Drivers
bubble_order = ["Vision", "Development", "Benefit", "Competence", "Stability", "Relationship"]
# Colors for each bubble (in the same order)
colors = ["#DF8859", "#E3B05B", "#418387", "#6D93AB", "#375570", "#C63F48"]
# Extract importance percentages (default to 0 if missing)
values_dict = trust_df.set_index("Predictor")["Importance_percent"].to_dict()
percentages = [values_dict.get(pred, 0) for pred in bubble_order]
# Scale bubble sizes proportionally (e.g., 20% should be twice the size of 10%)
min_radius = 0.15 # Set minimum bubble size to 0.18
base_percentage = min(percentages) if min(percentages) > 0 else 1 # Prevent division by zero
#bubble_radii = [min_radius * (p / base_percentage) ** 0.5 for p in percentages] # Area-based scaling
import math
bubble_radii = [
min_radius * (p / base_percentage) ** 0.75 # 0.7–0.8 range is ideal
for p in percentages]
# Central circle radius (Trust Core)
central_radius = 0.8
#Default positions ensuring bubbles slightly touch the Trust Core
default_positions = {
"Vision": (0.6, 0.85),
"Development": (1.05, 0.0),
"Benefit": (0.6, -0.85),
"Competence": (-0.6, -0.85),
"Stability": (-1.05, 0.0),
"Relationship": (-0.6, 0.85)
}
# Use user-defined positions if provided, else default positions
bubble_positions = bubble_positions if bubble_positions else default_positions
# Adjust positions dynamically based on bubble sizes to ensure touching Trust Core
# for i, trust_driver in enumerate(bubble_order):
# x, y = bubble_positions[trust_driver]
# bubble_radius = bubble_radii[i]
# scale_factor = (central_radius + bubble_radius + gap) / np.sqrt(x**2 + y**2)
# bubble_positions[trust_driver] = (x * scale_factor, y * scale_factor)
for i, trust_driver in enumerate(bubble_order):
x, y = bubble_positions[trust_driver]
bubble_radius = bubble_radii[i]
distance_to_core = np.sqrt(x**2 + y**2)
scale_factor = (central_radius + bubble_radius + gap) / distance_to_core
bubble_positions[trust_driver] = (x * scale_factor, y * scale_factor)
# Create the figure and axis
fig, ax = plt.subplots(figsize=(10, 10), dpi=300) # Increased resolution
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal') # Lock aspect ratio
ax.axis("off")
# Draw Trust Core image inside the central circle
extent = [-central_radius, central_radius, -central_radius, central_radius] # Trust Core image size
ax.imshow(trust_core_img, extent=extent, alpha=1.0)
# Draw bubbles ensuring they only touch but do not overlap
for i, trust_driver in enumerate(bubble_order):
x, y = bubble_positions[trust_driver]
radius = bubble_radii[i]
circle = patches.Circle((x, y), radius, facecolor=colors[i], alpha=1.0, lw=1.5)
ax.add_patch(circle)
ax.text(
x, y, f"{percentages[i]:.1f}%", fontsize=10, fontweight="bold",
ha="center", va="center", color="white"
)
# Add title
plt.title(title, fontsize=12)
# Save the plot to a bytes buffer and return a PIL Image
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format="png", bbox_inches="tight", facecolor=fig.get_facecolor())
img_buffer.seek(0)
plt.close(fig)
return Image.open(img_buffer)
def analyze_excel_single(file_path):
"""
Analyzes a single Excel file containing data and generates plots for Trust, NPS, Loyalty, Consideration, and Satisfaction models.
Args:
file_path (str): Path to the Excel file.
Returns:
Image: Image of the Trust regression plot.
Image: Image of the NPS regression plot.
Image: Image of the Loyalty regression plot.
Image: Image of the Consideration regression plot.
Image: Image of the Satisfaction regression plot.
str: Summary of the analysis.
"""
logger.info("Analyzing Excel file: %s", file_path)
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
logger.info("Created temporary directory: %s", temp_dir)
try:
# Manually construct file paths
text_output_path = os.path.join(temp_dir, "output.txt")
csv_output_path_trust = text_output_path.replace(".txt", "_trust.csv")
csv_output_path_nps = text_output_path.replace(".txt", "_nps.csv")
csv_output_path_loyalty = text_output_path.replace(".txt", "_loyalty.csv")
csv_output_path_consideration = text_output_path.replace(
".txt", "_consideration.csv"
)
csv_output_path_satisfaction = text_output_path.replace(
".txt", "_satisfaction.csv"
)
csv_output_path_trustbuilder = text_output_path.replace(
".txt", "_trustbuilder.csv"
)
# Load the Trust Driver dataset (CSV or Excel)
# Trust Driver dataset is mandatory
df = None
trustbuilder_present = False
excel_file = pd.ExcelFile(file_path)
# Load the Excel file with the fourth row as the header
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
# Check if the "Builder" sheet is present
if "Builder" in excel_file.sheet_names:
# Read the "Builder" sheet, making row 6 the header and reading row 7 onwards as data
builder_data = pd.read_excel(file_path, sheet_name="Builder", header=5)
# Check if the "Builder" sheet contains more than 10 rows
trustbuilder_present = len(builder_data) > 10
else:
trustbuilder_present = False
# Step 1: Check for missing columns and handle NPS column
required_columns = [
"Trust",
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
missing_columns = set(required_columns) - set(df.columns)
if missing_columns:
logger.warning("Missing columns in dataset: %s", missing_columns)
# Handling NPS column
nps_present = "NPS" in df.columns
if nps_present:
nps_missing_ratio = df["NPS"].isna().mean()
if nps_missing_ratio > 0.8:
df.drop(columns=["NPS"], inplace=True)
nps_present = False
# Handling Loyalty column
loyalty_present = "Loyalty" in df.columns
if loyalty_present:
loyalty_missing_ratio = df["Loyalty"].isna().mean()
if loyalty_missing_ratio > 0.8:
df.drop(columns=["Loyalty"], inplace=True)
loyalty_present = False
else:
print("not present")
# Handling Consideration column
consideration_present = "Consideration" in df.columns
if consideration_present:
consideration_missing_ratio = df["Consideration"].isna().mean()
if consideration_missing_ratio > 0.8:
df.drop(columns=["Consideration"], inplace=True)
consideration_present = False
else:
print("not present")
# Handling Satisfaction column
satisfaction_present = "Satisfaction" in df.columns
if satisfaction_present:
satisfaction_missing_ratio = df["Satisfaction"].isna().mean()
if satisfaction_missing_ratio > 0.8:
df.drop(columns=["Satisfaction"], inplace=True)
satisfaction_present = False
else:
print("not present")
# Step 2: Remove missing values and print data shape
df.dropna(subset=required_columns, inplace=True)
# Ensure the dataset has more than 10 rows
if df.shape[0] <= 10:
return (
None,
None,
None,
None,
None,
None,
"Dataset must contain more than 10 rows after preprocessing.",
)
# Step 3: Adjust Shapley regression analysis based on column presence
# Handle Trust Driver Analysis and Trust Builder Analysis
call_r_script(
file_path,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
nps_present,
loyalty_present,
consideration_present,
satisfaction_present,
trustbuilder_present,
)
# Read the output text file
with open(text_output_path, "r") as file:
output_text = file.read()
# Get file name for display
file_name = file_path.split("/")[-1]
# plot how full the trust buckets are
title = f"Trust Profile: {file_name}"
img_bucketfull = plot_bucket_fullness(df, title)
# plot trust
# Get n_samples from output text
n_samples_trust = output_text.split(": Trust")[1]
n_samples_trust = n_samples_trust.split("Analysis based on ")[1]
n_samples_trust = n_samples_trust.split("observations")[0]
results_df_trust = pd.read_csv(csv_output_path_trust)
results_df_trust["Importance_percent"] = results_df_trust["Importance"] * 100
average_value_trust = results_df_trust["Importance_percent"].mean()
# Instead of calling plot_model_results for Trust Drivers,
# call the separate bubble plot function:
img_trust = plot_trust_driver_bubbles(
results_df_trust,
f"Trust Drivers: {file_name}"
)
display_trust_score_1()
# plot NPS
img_nps = None
results_df_nps = None
if nps_present:
# Get n_samples from output text
n_samples_nps = output_text.split(": NPS")[1]
n_samples_nps = n_samples_nps.split("Analysis based on ")[1]
n_samples_nps = n_samples_nps.split("observations")[0]
results_df_nps = pd.read_csv(csv_output_path_nps)
results_df_nps["Importance_percent"] = results_df_nps["Importance"] * 100
average_value_nps = results_df_nps["Importance_percent"].mean()
img_nps = plot_model(
results_df_nps,
average_value_nps,
f"NPS Drivers: {file_name}",
"NPS",
)
# plot loyalty
img_loyalty = None
results_df_loyalty = None
if loyalty_present:
# Get n_samples from output text
n_samples_loyalty = output_text.split(": Loyalty")[1]
n_samples_loyalty = n_samples_loyalty.split("Analysis based on ")[1]
n_samples_loyalty = n_samples_loyalty.split("observations")[0]
results_df_loyalty = pd.read_csv(csv_output_path_loyalty)
results_df_loyalty["Importance_percent"] = (
results_df_loyalty["Importance"] * 100
)
average_value_loyalty = results_df_loyalty["Importance_percent"].mean()
img_loyalty = plot_model_results(
results_df_loyalty,
average_value_loyalty,
f"Loyalty Drivers: {file_name}",
"Loyalty",
)
else:
print("data is not present")
# plot consideration
img_consideration = None
results_df_consideration = None
if consideration_present:
# Get n_samples from output text
n_samples_consideration = output_text.split(": Consideration")[1]
n_samples_consideration = n_samples_consideration.split(
"Analysis based on "
)[1]
n_samples_consideration = n_samples_consideration.split("observations")[0]
results_df_consideration = pd.read_csv(csv_output_path_consideration)
results_df_consideration["Importance_percent"] = (
results_df_consideration["Importance"] * 100
)
average_value_consideration = results_df_consideration[
"Importance_percent"
].mean()
img_consideration = plot_model_results(
results_df_consideration,
average_value_consideration,
f"Consideration Drivers: {file_name}",
"Consideration",
)
else:
print("data not present")
# plot satisfaction
img_satisfaction = None
results_df_satisfaction = None
if satisfaction_present:
# Get n_samples from output text
n_samples_satisfaction = output_text.split(": Satisfaction")[1]
n_samples_satisfaction = n_samples_satisfaction.split("Analysis based on ")[
1
]
n_samples_satisfaction = n_samples_satisfaction.split("observations")[0]
results_df_satisfaction = pd.read_csv(csv_output_path_satisfaction)
results_df_satisfaction["Importance_percent"] = (
results_df_satisfaction["Importance"] * 100
)
average_value_satisfaction = results_df_satisfaction[
"Importance_percent"
].mean()
img_satisfaction = plot_model_results(
results_df_satisfaction,
average_value_satisfaction,
f"Satisfaction Drivers: {file_name}",
"Satisfaction",
)
else:
print("data not present")
# plot trust builder table 1 and 2
df_builder_pivot = None
if trustbuilder_present:
# Create dataframe for trust builder
results_df_builder = pd.read_csv(csv_output_path_trustbuilder)
bucket_colors = {
"Stability": "lightblue",
"Development": "lightgreen",
"Relationship": "lavender",
"Benefit": "lightyellow",
"Vision": "orange",
"Competence": "lightcoral",
}
combined_data = {
"Message": results_df_builder["Message"],
"Stability": results_df_builder["Stability"].round(0).astype(int),
"Development": results_df_builder["Development"].round(0).astype(int),
"Relationship": results_df_builder["Relationship"].round(0).astype(int),
"Benefit": results_df_builder["Benefit"].round(0).astype(int),
"Vision": results_df_builder["Vision"].round(0).astype(int),
"Competence": results_df_builder["Competence"].round(0).astype(int),
}
df_builder = pd.DataFrame(combined_data)
# Prepare lists to collect data
buckets = []
messages = []
percentages = []
bucket_columns = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Iterate through each bucket column
for bucket in bucket_columns:
for index, value in results_df_builder[bucket].items():
if value > 0:
buckets.append(bucket)
messages.append(results_df_builder["Message"][index])
percentages.append(int(round(value)))
# Create the new DataFrame
builder_consolidated = {
"Trust Bucket®": buckets,
"TrustBuilders®": messages,
"%": percentages,
}
df_builder_pivot = pd.DataFrame(builder_consolidated)
# Define the order of the Trust Bucket® categories
trust_driver_order = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
#trust_driver_order = [
# "Stability",
# "Development",
# "Relationship",
# "Competence",
#]
# Convert Trust Bucket® column to a categorical type with the specified order
df_builder_pivot["Trust Bucket®"] = pd.Categorical(
df_builder_pivot["Trust Bucket®"],
categories=trust_driver_order,
ordered=True,
)
# Sort the DataFrame by 'Trust Bucket®' and '%' in descending order within each 'Trust Bucket®'
df_builder_pivot = df_builder_pivot.sort_values(
by=["Trust Bucket®", "%"], ascending=[True, False]
)
# After processing, ensure to delete the temporary files and directory
os.remove(csv_output_path_trust)
if nps_present:
os.remove(csv_output_path_nps)
if loyalty_present:
os.remove(csv_output_path_loyalty)
if consideration_present:
os.remove(csv_output_path_consideration)
if satisfaction_present:
os.remove(csv_output_path_satisfaction)
if trustbuilder_present:
os.remove(csv_output_path_trustbuilder)
os.remove(text_output_path)
if img_nps is None:
# Load the placeholder image if NPS analysis was not performed
img_nps = Image.open("./images/nps_not_available.png")
img_nps = img_nps.resize((1000, 800), Image.Resampling.LANCZOS)
if img_loyalty is None:
# Load the placeholder image if Loyalty analysis was not performed
img_loyalty = Image.open("./images/loyalty_not_available.png")
img_loyalty = img_loyalty.resize((1000, 800), Image.Resampling.LANCZOS)
if img_consideration is None:
# Load the placeholder image if Consideration analysis was not performed
img_consideration = Image.open("./images/consideration_not_available.png")
img_consideration = img_consideration.resize(
(1000, 800), Image.Resampling.LANCZOS
)
if img_satisfaction is None:
# Load the placeholder image if Satisfaction analysis was not performed
img_satisfaction = Image.open("./images/satisfaction_not_available.png")
img_satisfaction = img_satisfaction.resize(
(1000, 800), Image.Resampling.LANCZOS
)
return (
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
)
except Exception as e:
logger.error("Error analyzing Excel file: %s", e)
raise
finally:
if os.path.exists(temp_dir):
try:
os.rmdir(temp_dir)
except Exception as e:
logger.error("Error removing temporary directory: %s", e)
def highlight_trust_bucket(row):
if "stability of trust buckets" in row["Trust Bucket®"]:
return ['background-color: yellow'] * len(row) # Apply yellow background to the entire row
return [''] * len(row)
def batch_file_processing(file_paths):
"""
Analyzes all Excel files in a list of file paths and generates plots for all models.
Args:
file_paths (List[str]): List of paths to the Excel files.
Returns:
Image: Image of the Trust regression plot.
Image: Image of the NPS regression plot.
Image: Image of the Loyalty regression plot.
Image: Image of the Consideration regression plot.
Image: Image of the Satisfaction regression plot.
str: Summary of the analysis.
"""
img_bucketfull_list = []
img_trust_list = []
img_nps_list = []
img_loyalty_list = []
img_consideration_list = []
img_satisfaction_list = []
df_builder_pivot_list = []
output_text_list = []
for file_path in file_paths:
try:
(
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
) = analyze_excel_single(file_path)
img_bucketfull_list.append(img_bucketfull)
img_trust_list.append(img_trust)
img_nps_list.append(img_nps)
img_loyalty_list.append(img_loyalty)
img_consideration_list.append(img_consideration)
img_satisfaction_list.append(img_satisfaction)
df_builder_pivot_list.append(df_builder_pivot)
output_text_list.append(output_text)
except Exception as e:
logger.error("Error processing file %s: %s", file_path, e)
return (
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
)
def highlight_stability(s):
return [
"background-color: yellow; font-weight: bold;" if "stability" in str(v).lower() else ""
for v in s
]
from PIL import Image, ImageDraw, ImageFont
def add_heading_to_image(image: Image.Image, heading: str, font_size=28):
# Create a heading image
width = image.width
heading_height = font_size + 20
total_height = image.height + heading_height
new_img = Image.new("RGB", (width, total_height), (255, 255, 255))
draw = ImageDraw.Draw(new_img)
try:
font = ImageFont.truetype("arial.ttf", font_size)
except:
font = ImageFont.load_default()
draw.text((10, 10), heading, font=font, fill=(0, 0, 0))
new_img.paste(image, (0, heading_height))
return new_img
def combine_two_images_horizontally(img1: Image.Image, heading1: str, img2: Image.Image, heading2: str):
img1 = add_heading_to_image(img1, heading1)
img2 = add_heading_to_image(img2, heading2)
max_height = max(img1.height, img2.height)
total_width = img1.width + img2.width
combined = Image.new("RGB", (total_width, max_height), (255, 255, 255))
combined.paste(img1, (0, 0))
combined.paste(img2, (img1.width, 0))
return combined
def bold_high_impact_row(row):
try:
if float(row["%"]) >= 18:
return ['font-weight: bold'] * len(row)
except:
pass
return [''] * len(row)
def variable_outputs(file_inputs):
file_inputs_single = file_inputs
# Call batch file processing and get analysis results
(
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
) = batch_file_processing(file_inputs_single)
# Get number of datasets uploaded
k = len(file_inputs_single)
# Container for visible plots
global plots_visible
plots_visible = []
# Use zip_longest to iterate over the lists, padding with None
for row, (
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
) in enumerate(
zip_longest(
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
)
):
# Get dataset name
dataset_name = file_inputs_single[row].split("/")[-1]
global plots
# Based on the number of files uploaded, determine the content of each textbox
plots = [
gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>Trust and NPS Drivers</span>",
visible=True,
),
gr.Markdown(
"""
The analysis identifies the TrustLogic® dimensions that are most effective in driving your audience's likelihood to recommend and trust you
""",
visible=True,
),
# ✅ Side-by-side Trust & NPS drivers
gr.Image(
value=combine_two_images_horizontally(img_trust, "Trust Drivers", img_nps, "NPS Drivers"),
type="pil",
label="Trust + NPS Drivers",
visible=True,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Textbox(
value=output_text,
visible=False,
),
]
# add current plots to container
plots_visible += plots
if isinstance(df_builder_pivot, pd.DataFrame):
logger.debug(f"df_builder_pivot: {df_builder_pivot}")
markdown_5 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'> What to say and do to build your trust and Net Promoter Score </span>",
visible=True,
)
markdown_6 = gr.Markdown(
"<span style='font-size:17px; font-weight:bold;'>You see the most effective attributes for fulfilling your Trust and NPS Drivers — the things you need to say and do to increase recommendation and build trust. Scroll down to use them with our TrustLogicAI.</span>",
#+ "<br>In the table, use the little arrow in each column to toggle the most to least effective TrustBuilders® to fill each Trust Bucket®. Your focus is only on the Trust Bucket® with the highest driver impact. "
# + "<br> Note: Even if Trust Buckets® for Customers and Prospects overlap, the most effective statements are very different. This provides clear guidance for acquisition versus loyalty activities.",
visible=True,
)
styled_df = df_builder_pivot.style.apply(bold_high_impact_row, axis=1)
table_builder_2 = gr.Dataframe(
value=styled_df,
headers=list(df_builder_pivot.columns),
interactive=False,
label=f"{dataset_name}",
visible=True,
height=800,
wrap=True,
)
plots_visible.append(markdown_5)
plots_visible.append(markdown_6)
plots_visible.append(table_builder_2)
else:
plots_visible.append(gr.Markdown("", visible=False))
plots_visible.append(gr.Markdown("", visible=False))
plots_visible.append(gr.Dataframe(value=None, label="", visible=False))
plots_invisible = [
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Image(label="Trust Buckets", visible=False),
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Image(label="Trust Drivers", visible=False),
gr.Image(label="NPS Drivers", visible=False),
gr.Image(label="Loyalty Drivers", visible=False),
gr.Image(label="Consideration Drivers", visible=False),
gr.Image(label="Satisfaction Drivers", visible=False),
gr.Textbox(label="Analysis Summary", visible=False),
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Dataframe(value=None, label=" ", visible=False),
]
return plots_visible + plots_invisible * (max_outputs - k)
def reset_outputs():
# Reset outputs
outputs = []
# Create fixed dummy components
markdown_3 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>Trust and NPS Drivers</span>",
visible=True,
)
markdown_4 = gr.Markdown(
"""
This analysis shows which Trust Buckets® are most effective in building trust and improving your key performance indicators (KPIs).
<br><br>
The middle line is the average importance. The bars extending to the right show which Trust Buckets® are most important. The higher the percentage, the more important the Trust Bucket® is to your audience.
""",
visible=True,
)
trust_plot = gr.Image(value=None, label="Trust Drivers", visible=False)
nps_plot = gr.Image(value=None, label="NPS Drivers", visible=False)
loyalty_plot = gr.Image(value=None, label="Loyalty Drivers", visible=False)
consideration_plot = gr.Image(
value=None, label="Consideration Drivers", visible=False
)
satisfaction_plot = gr.Image(value=None, label="Satisfaction Drivers", visible=False)
summary_text = gr.Textbox(value=None, label="Analysis Summary", visible=False)
markdown_5 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>TrustBuilders®",
visible=True,
)
markdown_6 = gr.Markdown(
"These are the specific reasons to trust and recommend. They can be your brand values, features, attributes, programs, and messages. "
+ "<br>For practical purposes, they tell you exactly what to do and say to build more trust and improve your KPIs. "
+ "<br>In the table, use the little arrows to toggle by Trust Bucket® or Trust Builder® importance. "
+ "<br>Tip: Compare Owners and Prospects. Even though some of the Trust Buckets® are the same, the Trust Builders® are very different.",
visible=True,
)
df_builder_pivot = gr.Dataframe(value=None, label="", visible=True)
outputs.append(markdown_3)
outputs.append(markdown_4)
outputs.append(trust_plot)
outputs.append(nps_plot)
outputs.append(loyalty_plot)
outputs.append(consideration_plot)
outputs.append(satisfaction_plot)
outputs.append(summary_text)
outputs.append(markdown_5)
outputs.append(markdown_6)
outputs.append(df_builder_pivot)
# invisible from second set onwards
for i in range(1, max_outputs):
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Textbox(value=None, label="", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Dataframe(value=None, label="", visible=False))
return outputs
def data_processing(file_path):
"""
Processes a single CSV file and generates required outputs.
Args:
file_path (str): Path to the CSV file.
Returns:
tuple: Contains processed data and results (customize based on your needs).
"""
try:
logger.info("Processing CSV file: %s", file_path)
# Load the first two rows to get the column names
header_df = pd.read_csv(file_path, header=None, nrows=2)
# Fill NaN values in the rows with an empty string
header_df.iloc[0] = header_df.iloc[0].fillna("")
header_df.iloc[1] = header_df.iloc[1].fillna("")
# Merge the two rows to create column names
merged_columns = header_df.iloc[0] + " " + header_df.iloc[1]
# Load the rest of the DataFrame using the merged column names
df = pd.read_csv(file_path, skiprows=2, names=merged_columns)
# For any value in all columns that contain " - " (rating),
# split and only take the first part (in digit format)
def split_value(val):
if isinstance(val, str) and " - " in val:
return val.split(" - ")[0]
return val
# Apply the function to all elements of the DataFrame
df = df.applymap(split_value)
# Convert the columns from the third column onwards to numeric
df.iloc[:, 2:] = df.iloc[:, 2:].apply(pd.to_numeric, errors="coerce")
# Search for the text in the column names
search_text = "how likely are you to buy another".lower()
col_index = [
i for i, col in enumerate(df.columns) if search_text in col.lower()
]
if col_index:
col_index = col_index[0] # Assuming there is only one matching column
# Define the mapping dictionary for reverse replacement
replace_map = {1: 5, 2: 4, 4: 2, 5: 1}
# Replace values in the specified column
df.iloc[:, col_index] = df.iloc[:, col_index].replace(replace_map)
column_mapping = {
"Did you own a": "Q1",
"your age": "Q2",
"How likely are you to recommend buying a": "NPS",
"level of trust": "Trust",
"buy another": "Loyalty",
"consider buying": "Consideration",
"Has built a strong and stable foundation": "Stability",
"Will develop well in the future": "Development",
"Relates well to people like me": "Relationship",
"Is valuable to our lives": "Benefit",
"Has vision and values I find appealing": "Vision",
"Has what it takes to succeed": "Competence",
}
# Create a list to hold the labels
list_labels = []
# Loop through each column in merged_columns
for col in merged_columns:
label = None
for key, value in column_mapping.items():
if key.lower() in col.lower():
label = value
break
if label:
list_labels.append(label)
# Determine the difference between the lengths of list_labels and merged_columns
difference = len(merged_columns) - len(list_labels)
# TRUST STATEMENTS TB1 - TB37 populate to the rest of columns
# Append the next values ("TB1", "TB2", ...) until list_labels matches the length of merged_columns
for i in range(difference):
list_labels.append(f"TB{i + 1}")
# Add list_labels as the first row after the column names
df_labels = pd.DataFrame([list_labels], columns=df.columns)
# Concatenate header_df, df_labels, and df
header_df.columns = df.columns # Ensure header_df has the same columns as df
# Create a DataFrame with 2 rows of NaNs
nan_rows = pd.DataFrame(np.nan, index=range(2), columns=df.columns)
# Pad 2 rows of NaNs, followed by survey questions to make it the same format as the input excel file
df = pd.concat([nan_rows, header_df, df_labels, df]).reset_index(drop=True)
# Make list labels the column names
df.columns = list_labels
# Remove columns beyond TB37
max_tb_label = 37
tb_columns = [col for col in df.columns if col.startswith("TB")]
tb_columns_to_keep = {f"TB{i + 1}" for i in range(max_tb_label)}
tb_columns_to_drop = [
col for col in tb_columns if col not in tb_columns_to_keep
]
df.drop(columns=tb_columns_to_drop, inplace=True)
# Take snippets from df as drivers
kpis = [
"Trust",
"NPS",
"Loyalty",
"Consideration",
"Satisfaction",
]
drivers = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Create an empty list to store the selected columns
selected_columns = []
# Check each item in kpis and drivers and search in df.columns
for kpi in kpis:
for col in df.columns:
if pd.notna(col) and kpi.lower() in col.lower():
selected_columns.append(col)
for driver in drivers:
for col in df.columns:
if pd.notna(col) and driver.lower() in col.lower():
selected_columns.append(col)
# Extract the selected columns into a new DataFrame df_drivers
df_drivers = df[selected_columns].iloc[4:].reset_index(drop=True)
# Create a DataFrame with 2 rows of NaNs
nan_rows = pd.DataFrame(np.nan, index=range(2), columns=df_drivers.columns)
# Pad 3 rows of NaNs to make it the same format as the input excel file
df_drivers = pd.concat([nan_rows, df_drivers]).reset_index(drop=True)
# Get dataset name
dataset_name = file_path.split("/")[-1]
dataset_name = dataset_name.split(".")[0]
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
logger.info("Created temporary directory for processed file: %s", temp_dir)
# Save processed df as an Excel file in the temporary directory
processed_file_path = os.path.join(temp_dir, f"{dataset_name}.xlsx")
with pd.ExcelWriter(processed_file_path) as writer:
df_drivers.to_excel(writer, sheet_name="Driver", index=False)
df.to_excel(writer, sheet_name="Builder", index=False)
return processed_file_path
except Exception as e:
logger.error("Error processing CSV file: %s", e)
raise
def process_examples(file_name):
file_path = f"example_files/{file_name[0]}"
file_path = [file_path]
outputs = variable_outputs(file_path)
return outputs
def process_datasets(file_inputs):
"""
Processes uploaded datasets and calls appropriate functions based on file type.
Args:
file_inputs (List[UploadFile]): List of uploaded files.
Returns:
List[gr.Blocks]: List of Gradio output components.
"""
outputs_list = []
for file_input in file_inputs:
file_path = file_input.name
file_extension = os.path.splitext(file_path)[-1].lower()
if file_extension == ".xlsx":
outputs_list.append(file_path)
elif file_extension == ".csv":
try:
processed_file_path = data_processing(file_path)
outputs_list.append(processed_file_path)
except Exception as e:
logger.error("Error processing file %s: %s", file_path, e)
outputs = variable_outputs(outputs_list)
return outputs
# Load knowledge base
def load_knowledge_base():
try:
loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
return docs
except Exception as e:
logger.error(f"Error loading knowledge base: {e}")
raise e
knowledge_base = load_knowledge_base()
# Initialize embeddings and FAISS index
try:
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(knowledge_base, embeddings)
except Exception as e:
logger.error(f"Error initializing FAISS index: {e}")
raise e
# Define search function for knowledge base
def search_knowledge_base(query):
try:
output = db.similarity_search(query)
return output
except Exception as e:
logger.error(f"Error searching knowledge base: {e}")
return ["Error occurred during knowledge base search"]
# SERPER API Google Search function
def google_search(query):
try:
search_client = serpapi.Client(api_key=serper_api_key)
results = search_client.search(
{
"engine": "google",
"q": query,
}
)
snippets = [result["snippet"] for result in results.get("organic_results", [])]
return snippets
except requests.exceptions.HTTPError as http_err:
logger.error(f"HTTP error occurred: {http_err}")
return ["HTTP error occurred during Google search"]
except Exception as e:
logger.error(f"General Error: {e}")
return ["Error occurred during Google search"]
# RAG response function
def rag_response(query):
try:
retrieved_docs = search_knowledge_base(query)
context = "\n".join(doc.page_content for doc in retrieved_docs)
prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
response = llm.invoke(prompt)
return response.content
except Exception as e:
logger.error(f"Error generating RAG response: {e}")
return "Error occurred during RAG response generation"
def compute_dataframe_proof_point():
global selected_dataset_ai
global df_builder_pivot_str
try:
# Load the selected dataset
dataset_file_path = f"example_files/{selected_dataset_ai}"
(
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
) = analyze_excel_single(dataset_file_path)
if df_builder_pivot is not None:
qualified_bucket_names_list = []
# Remove buckets with values below 18%
qualified_bucket_names_trust = results_df_trust[
results_df_trust["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_trust)
if results_df_nps is not None:
qualified_bucket_names_nps = results_df_nps[
results_df_nps["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_nps)
if results_df_loyalty is not None:
qualified_bucket_names_loyalty = results_df_loyalty[
results_df_loyalty["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_loyalty)
if results_df_consideration is not None:
qualified_bucket_names_consideration = results_df_consideration[
results_df_consideration["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_consideration)
if results_df_satisfaction is not None:
qualified_bucket_names_satisfaction = results_df_satisfaction[
results_df_satisfaction["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_satisfaction)
# Flatten the list of lists and convert to a set to remove duplicates
qualified_bucket_names_flat = [
item for sublist in qualified_bucket_names_list for item in sublist
]
qualified_bucket_names_unique = list(set(qualified_bucket_names_flat))
# Filter df_builder_pivot to include only statements where "Trust Driver" is in qualified_bucket_names_unique
df_builder_pivot = df_builder_pivot[
df_builder_pivot["Trust Bucket®"].isin(qualified_bucket_names_unique)
]
# Remove statements with values below 18%
df_builder_pivot = df_builder_pivot[df_builder_pivot["%"] >= 18]
df_builder_pivot_str = df_builder_pivot.to_string(index=False)
else:
df_builder_pivot_str = "Trust Builder information is not available."
except FileNotFoundError:
df_builder_pivot_str = "Dataset not found."
except Exception as e:
df_builder_pivot_str = f"An error occurred during analysis: {e}"
return df_builder_pivot_str
# Define tools using LangChain's `tool` decorator
@tool
def knowledge_base_tool(query: str):
"""
Tool function to query the knowledge base and retrieve a response.
Args:
query (str): The query to search the knowledge base.
Returns:
str: The response retrieved from the knowledge base.
"""
return rag_response(query)
@tool
def google_search_tool(query: str):
"""
Tool function to perform a Google search using the SERPER API.
Args:
query (str): The query to search on Google.
Returns:
list: List of snippets extracted from search results.
"""
return google_search(query)
@tool
def compute_dataframe_proof_point_tool() -> str:
"""
Tool function to compute DATAFRAME_PROOF_POINT.
Returns:
str: The computed DATAFRAME_PROOF_POINT as a string.
"""
return compute_dataframe_proof_point()
tavily_tool = TavilySearchResults(
max_results=5,
search_depth="advanced",
topic="news",
days=1,
include_answer=True,
include_raw_content=True,
# include_domains=[...],
exclude_domains=['example.com'],
# name="...", # overwrite default tool name
# description="...", # overwrite default tool description
# args_schema=..., # overwrite default args_schema: BaseModel
)
# compile all tools as a list
tools = [
knowledge_base_tool,
tavily_tool,
#compute_dataframe_proof_point_tool,
]
def validate_ai_output(ai_output, proof_points):
"""
Validates that the AI output includes all relevant Trust Buckets and Builders.
Args:
ai_output: The generated response from the AI.
proof_points: The DATAFRAME_PROOF_POINT dictionary with Trust Buckets and Builders.
Returns:
Validated and corrected output.
"""
validated_output = ai_output
missing_buckets = []
# Check if all relevant buckets are included
for bucket, builders in proof_points.items():
if bucket not in ai_output:
missing_buckets.append(bucket)
# Add missing buckets and builders if any
if missing_buckets:
corrections = []
for bucket in missing_buckets:
corrections.append(f"**{bucket}**")
for builder in proof_points[bucket]:
corrections.append(f"- {builder['Trust Builder']} [{builder['Percentage']}%]")
validated_output = f"{validated_output}\n\nMissing Data:\n" + "\n".join(corrections)
return validated_output
# Create the prompt template
prompt_message = """
**Role**
You are an expert copywriter specializing in creating high-quality marketing content that integrates Top-Scoring Statements for Each Trust Bucket into various formats. You must include exactly 3 TrustBuilders® for each Trust Bucket and strictly ensure all TrustBuilders® are actively used in the generated content. Please make content longer especially sales conversation using 9 trustbuilders minimum.
*Strictly use google search for finding features*
mention about optional beats audio system upgrade
**Listing Top-Scoring Statements**
- Use the following format to display top-scoring statements:
Top-scoring statements
**Bucket Name**
- TrustBuilder® Statement 1 [Percentage]
- TrustBuilder® Statement 2 [Percentage]
- TrustBuilder® Statement 3 [Percentage]
```
Generating Content**
- Integrate **all listed TrustBuilders®** into the requested content format. Strictly minimu 9 trustbuilders must be used to make content longer.
- Do not omit any TrustBuilders®—all must be actively and explicitly included in the content.
- Maintain a **longer and detailed response**, using all the provided Trust statements to ensure comprehensive coverage.
- Use google search as well.
---
**Strict Requirements**
1. **Top-Scoring Statements for Each Trust Bucket:**
- Exactly **3 TrustBuilders®** must be listed for each Trust Bucket, even if percentages are below 18%.
- Ensure consistency in the format.
2. **Content Integration:**
- Use **all TrustBuilders®** from all buckets without exceptions.
- Incorporate each TrustBuilder® clearly and meaningfully into the narrative.
---
**Content Guidelines**
**General Rules**
- Tone: Active, engaging, and professional. Avoid flowery or overly complex language.
- Specificity: Include relevant names, numbers (e.g., dollars, years), programs, awards, strategies, or locations.
---
**Content Types**
1. **Annual Reports/Articles/blog **
- Intro line: "Here is a draft of your Blog . Feel free to suggest further refinements."
- Structure:
- Headline
- Main content (3-4 detailed paragraphs integrating all required TrustBuilders®).
- Additional Sections:
- List of TrustBuilders® Used : list minimum 9 top scoring statements retrieved
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Mention and explain any metaphor, analogy, or creative technique employed.
2. **Sales Conversations/Ad Copy**
- Strictly use google search
- Structure:
Detailed conversation.
- Intro line: "Here is a draft of your [Sales Conversation/Ad Copy]. Feel free to suggest further refinements."
- Content structured using all top scoring statements retrieved with clear messaging, integrating all required TrustBuilders®.
- Additional Sections:
- List of TrustBuilders® Used : mention minimum 9 top scoring statements
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Mention and explain any creative elements used.
3. **Emails, Newsletters, Direct Marketing Letters**
- Intro Line: "Here is a draft of your [Email/Newsletter/Letter]. Feel free to suggest further refinements."
- Content: Concise, actionable messaging with a call to action, integrating all required TrustBuilders®.
- Additional Sections:
- List of TrustBuilders® Used : mention minimum 9 top scoring statements
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Highlight creative approaches used.
#### **GENERAL QUERIES**
- For blogs or reports, refer to the knowledge base first. Focus on overall flow and structure without mentioning trust metrics unless requested.
"""
prompt_template = ChatPromptTemplate.from_messages(
[
("system", prompt_message),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
# Create Langchain Agent with specific model and temperature
try:
llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
llm_with_tools = llm.bind_tools(tools)
except Exception as e:
logger.error(f"Error creating Langchain Agent: {e}")
# Define the agent pipeline to handle the conversation flow
try:
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: x["chat_history"],
}
| prompt_template
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
# Instantiate an AgentExecutor to execute the defined agent pipeline
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
except Exception as e:
logger.error(f"Error defining agent pipeline: {e}")
# Initialize chat history
chat_history = []
trust_tips = [
"What I don’t know I can’t trust you for. Make sure you know all your great TrustBuilders® and use them over time.",
"The more specific, the more trustworthy each TrustBuilder® is.",
"For TrustBuilders®, think about each Trust Bucket® and in each one organization, product, and key individuals.",
"You are infinitely trustworthy. Organization, products, and your people. In each Trust Bucket® and past, present, and future.",
"Some TrustBuilders® are enduring (we have over 3 million clients), others changing (we are ranked No. 1 for 8 years/9 years), and yet others short-lived (we will present at XYZ conference next month).",
"Not all Trust Buckets® are equally important all the time. Think about which ones are most important right now and how to fill them (with TrustAnalyser® you know).",
"In social media, structure posts over time to focus on different Trust Buckets® and themes within them.",
"Try focusing your idea on specific Trust Buckets® or a mix of them.",
"Within each Trust Bucket®, ask for examples across different themes like employee programs, IT, R&D.",
"To create more and different trust, ask trustifier.ai to combine seemingly unconnected aspects like 'I played in bands all my youth. What does this add to my competence as a lawyer?'",
"With every little bit more trust, your opportunity doubles. It's about using trustifier.ai to help you nudge trust up ever so slightly in everything you do.",
"Being honest is not enough. You can be honest with one aspect and destroy trust and build a lot of trust with another. Define what that is.",
"The more I trust you, the more likely I am to recommend you. And that's much easier with specifics.",
"What others don’t say they are not trusted for - but you can claim that trust.",
"Building more trust is a service to your audience. It's so valuable to us, as humans, that we reflect that value right away in our behaviors.",
"In your audience journey, you can use TrustAnalyser® to know precisely which Trust Buckets® and TrustBuilders® are most effective at each stage of the journey.",
"Try structuring a document. Like % use of each Trust Bucket® and different orders in the document.",
"In longer documents like proposals, think about the chapter structure and which Trust Buckets® and TrustBuilders® you want to focus on when.",
"Building Trust doesn’t take a long time. Trust is built and destroyed every second, with every word, action, and impression. That's why it's so important to build more trust all the time.",
"There is no prize for the second most trusted. To get the most business, support, and recognition, you have to be the most trusted.",
"With most clients, we know they don’t know 90% of their available TrustBuilders®. Knowing them increases internal trust - and that can be carried to the outside.",
"Our client data always shows that, after price, trust is the key decision factor (and price is a part of benefit and relationship trust).",
"Our client data shows that customer value increases 9x times from Trust Neutral to High Trust. A good reason for internal discussions.",
"Our client's data shows that high trust customers are consistently far more valuable than just trusting ones.",
"Trust determines up to 85% of your NPS. No wonder, because the more I trust you, the more likely I am to recommend you.",
"Trust determines up to 75% of your loyalty. Think about it yourself. It's intuitive.",
"Trust determines up to 87% of your reputation. Effectively, they are one and the same.",
"Trust determines up to 85% of your employee engagement. But what is it that they want to trust you for?",
"Don't just ask 'what your audience needs to trust for'. That just keeps you at low, hygiene trust levels. Ask what they 'would love to trust for'. That's what gets you to High Trust."
]
suggestions = [
"Try digging deeper into a specific TrustBuilder®.",
"Ask just for organization, product, or a person's TrustBuilders® for a specific Trust Bucket®.",
"Some TrustBuilders® can fill more than one Trust Bucket®. We call these PowerBuilders. TrustAnalyser® reveals them for you.",
"Building trust is storytelling. trustifier.ai connects Trust Buckets® and TrustBuilders® for you. But you can push it more to connect specific Trust Buckets® and TrustBuilders®.",
"Describe your audience and ask trustifier.ai to choose the most relevant Trust Buckets®, TrustBuilders®, and tonality (TrustAnalyser® can do this precisely for you).",
"Ask trustifier.ai to find TrustBuilders® for yourself. Then correct and add a few for your focus Trust Buckets® - and generate a profile or CV.",
"LinkedIn Profiles are at their most powerful if they are regularly updated and focused on your objectives. Rewrite it every 2-3 months using different Trust Buckets®.",
"Share more of your TrustBuilders® with others and get them to help you build your trust.",
"Build a trust strategy. Ask trustifier.ai to find all your TrustBuilders® in the Trust Buckets® and then create a trust-building program for a specific person/audience over 8 weeks focusing on different Trust Buckets® that build on one another over time. Then refine and develop by channel ideas.",
"Brief your own TrustBuilders® and ask trustifier.ai to tell you which Trust Buckets® they're likely to fill (some can fill more than one).",
"Have some fun. Ask trustifier.ai to write a 200-word speech to investors using all Trust Buckets®, but leading and ending with Development Trust. Use [BRAND], product, and personal CEO [NAME] TrustBuilders®.",
"Ask why TrustLogic® can be trusted in each Trust Bucket®.",
"Ask what's behind TrustLogic®."
]
def get_trust_tip_and_suggestion():
trust_tip = random.choice(trust_tips)
suggestion = random.choice(suggestions)
return trust_tip, suggestion
def get_top_scoring_statements(dataset):
"""
Retrieve top-scoring statements for specific trust buckets based on dataset
Args:
dataset (str): The selected dataset ('VW Owners' or 'VW Prospects')
Returns:
str: Formatted top-scoring statements
"""
if dataset == "VW Owners.xlsx":
# Top statements for VW Owners
top_statements = """
Top Scoring Statements:
*Development*
- We bring together the world's best talent in many disciplines to create your cars.(25%)
- Building great and affordable cars is our foundation.(22%)
- Our beginnings are a unique combination of investors and unions, and today 9 of our 20 board members are staff representatives.(18%)
*Benefit*
- We bring together the world's best talent in many disciplines to create your cars.(23%)
- We strongly focus on keeping and nurturing our team and have a 99.5% retention rate.(18%)
- Employees are provided with extensive continuous training.(16%)
*Vision*
- Our brands are ranked No. 2 and 5 in the reliability rankings.(27%)
- Our technology and manufacturing capabilities are second to none.(22%)
- We produce almost 9 million cars per year.(15%)
"""
elif dataset == "Volkswagen Non Customers.xlsx":
# Top statements for VW Prospects
top_statements = """
Top Scoring Statements:
*Stability*
- We work with our unions in our restructuring and future plans.(21%)
- We have learned from our mistakes in the Diesel Affair and made fundamental changes.(19%)
- Building great and affordable cars is our foundation.(18%)
*Relationship*
- We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (24%)
- We are at the forefront of technology to deliver better cars and driving experiences. (17%)
- Our beginnings are a unique combination of investors and unions, and today 9 of our 20 board members are staff representatives. (17%)
*Competence*
- At the heart of our decision-making is the long-term quality of life for all of us. (20%)
- We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (19%)
- We are one of the longest-established car companies. (18%)
"""
return top_statements
last_top_scoring_statements = None
def chatbot_response(message, history, selected_dataset):
"""
Generate chatbot response dynamically using selected dataset, user input, and maintaining history.
"""
global last_top_scoring_statements
try:
if not selected_dataset:
return [("Error", "❌ No dataset selected. Please select one and try again.")], history
# Define datasets and corresponding trust buckets
datasets = {
"VW Owners.xlsx": {
"Development": [
"We bring together the world's best talent in many disciplines to create your cars. (25%)",
"Building great and affordable cars is our foundation. (22%)",
"Our beginnings are a unique combination of investors and unions. (18%)",
],
"Benefit": [
"We bring together the world's best talent in many disciplines to create your cars. (23%)",
"We strongly focus on keeping and nurturing our team and have a 99.5% retention rate. (18%)",
"Employees are provided with extensive continuous training. (16%)",
],
"Vision": [
"Our brands are ranked No. 2 and 5 in the reliability rankings. (27%)",
"Our technology and manufacturing capabilities are second to none. (22%)",
"We produce almost 9 million cars per year. (15%)",
],
},
"Volkswagen Non Customers.xlsx": {
"Stability": [
"We work with our unions in our restructuring and future plans. (21%)",
"We have learned from our mistakes in the Diesel Affair and we have made fundamental changes. (19%)",
"Building great and affordable cars is our foundation. (18%)",
],
"Relationship": [
"We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (24%)",
"We are at the forefront of technology to deliver better cars and driving experiences. (17%)",
"Our beginnings are a unique combination of investors and unions and today 9 of our 20 board members are staff representatives. (17%)",
],
"Competence": [
"At the heart of our decision-making is the long-term quality of life for all of us. (20%)",
"We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (19%)",
"We are one of the longest-established car companies. (18%)",
],
},
}
if selected_dataset not in datasets:
return [("Error", f"Invalid dataset: {selected_dataset}")], history
# Build top-scoring statements block
trust_data = datasets[selected_dataset]
top_scoring_statements = "### Top Scoring Statements ###\n\n"
for bucket, statements in trust_data.items():
top_scoring_statements += f"**{bucket}**:\n"
for statement in statements:
top_scoring_statements += f"- {statement}\n"
top_scoring_statements += "\n"
last_top_scoring_statements = top_scoring_statements
# Build prompt
combined_prompt = "\n\n### Top-Scoring Statements for Integration ###\n" + top_scoring_statements
combined_prompt += "\n\nUser Input:\n" + message
trust_tip, suggestion = get_trust_tip_and_suggestion()
trust_tip_and_suggestion = f"\n\n---\n\n**Trust Tip**: {trust_tip}\n\n**Suggestion**: {suggestion}"
# Use existing history in prompt
for entry in history:
combined_prompt += f"\n{entry['role']}: {entry['content']}"
# Structured input to agent
structured_input = {
"input": combined_prompt,
"chat_history": history,
}
# Get agent response
agent_output = agent_executor.invoke(structured_input)
# Build full AI reply
full_response = f"**Selected Dataset: {selected_dataset}**\n\n"
full_response += top_scoring_statements
full_response += f"\n{agent_output['output']}"
full_response += trust_tip_and_suggestion
# Update history
updated_history = history + [
{"role": "user", "content": message},
{"role": "assistant", "content": agent_output["output"]}
]
# Reconstruct chatbot message list
chatbot_pairs = []
for i in range(0, len(updated_history) - 1, 2):
if i + 1 < len(updated_history):
chatbot_pairs.append((updated_history[i]["content"], updated_history[i + 1]["content"]))
return chatbot_pairs, updated_history
except Exception as e:
logger.error(f"Unexpected error: {e}")
return [("Error", "❌ Something went wrong. Please try again.")], history
def read_ai_dataset_selection():
global selected_dataset_ai
return selected_dataset_ai
import matplotlib.pyplot as plt
import numpy as np
import io
import base64
def generate_trust_score_image(score):
max_score = 10
values = [score, max_score - score]
# Match R² chart size
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
# Donut chart
wedges, _ = ax.pie(
values,
startangle=90,
counterclock=False,
colors=["#4CAF50", "#C0C0C0"],
wedgeprops=dict(width=0.35)
)
# Center Score
ax.text(0, 0, f"{score}", ha='center', va='center', fontsize=19, fontweight='bold')
# Radial labels (adjusted for perfect alignment)
labels = ["9–10: High Trust", "0–4: Low Trust", "7–8: Trust", "5–6: Trust Neutral"]
angles = [135, 45, 225, 315] # TL, TR, BL, BR
radius = 1.5
for label, angle in zip(labels, angles):
x = radius * np.cos(np.deg2rad(angle))
y = radius * np.sin(np.deg2rad(angle))
# Shift top labels up, bottom labels down
if angle < 90 or angle > 270:
y += 0.02
else:
y -= 0.08
# Extra downward nudge for right side to align with left
if angle == 45 or angle == 315:
y -= 0.05
ax.text(x, y, label, ha='center', va='center', fontsize=9, color='black')
# Clean layout
ax.axis('off')
fig.patch.set_alpha(0.0)
ax.patch.set_alpha(0.0)
# Save to buffer
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
# Return HTML with title + image
return f"""
<div style='display: flex; flex-direction: column; align-items: center;'>
<h3 style='text-align:center; margin-bottom:6px;'>Trust Score</h3>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
# Create fixed score variants
def display_trust_score_1():
return generate_trust_score_image(7.9)
def display_trust_score_2():
return generate_trust_score_image(6.8)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import base64
def reset_variables():
"""
Reset global variables to handle dataset changes.
"""
global selected_dataset_ai
selected_dataset_ai = None
def update_ai_dataset_selection(selection):
global selected_dataset_ai # Ensure the global variable is updated
reset_variables()
chat_history.clear()
if selection == "VW Owners":
selected_dataset_ai = vw_customers_state.value[0] # Use the customer dataset
elif selection == "VW Prospects":
selected_dataset_ai = vw_prospects_state.value[0] # Use the prospects dataset
return selected_dataset_ai
placeholder_text = """"""
predefined_prompt = """
What: Car showroom sales conversation between a prospective buyer of a new T-Roc and our VW advisor.
Who: The visitor is a 24-year-old female, stylishly dressed in brand items.
Topics:
1. Future car usage.
2. Current car and experience with it.
Specifics:
- Highlight T-Roc features that connect with her interests. Find the feature on the T-Roc US website.
- Discuss petrol and electric engine types.
- Focus on aesthetics, exterior design and strong interior features/experience.
Proof Points and Feature Usage:
- Connect features contextually and creatively.
- Be specific with the features and examples, including feature names, numbers, brands, facts, and their implications for the driving and ownership experience.
Style:
- End responses with a question or suggestion to steer to the next topic.
- Convey TrustBuilders® naturally.
"""
# Text input box for the user to enter their prompt
prompt_textbox = gr.Textbox(
value=predefined_prompt,
scale=4,
label="Insert your prompt",
visible=True,
)
submit_button = gr.Button("Submit")
bot = gr.Chatbot(placeholder=placeholder_text)
js_func = """
function refresh() {
// Force light theme if not already
const url = new URL(window.location);
if (url.searchParams.get('__theme') !== 'light') {
url.searchParams.set('__theme', 'light');
window.location.href = url.href;
return;
}
// Highlight toggle
const btn1 = document.querySelector("#vw_customers_btn");
const btn2 = document.querySelector("#vw_prospects_btn");
if (btn1 && btn2) {
btn1.classList.add("active-btn"); // Default on load
btn1.addEventListener("click", () => {
btn1.classList.add("active-btn");
btn2.classList.remove("active-btn");
});
btn2.addEventListener("click", () => {
btn2.classList.add("active-btn");
btn1.classList.remove("active-btn");
});
}
}
"""
css = """
#vw_customers_btn, #vw_prospects_btn {
border: 2px solid #ccc;
padding: 10px 20px;
font-size: 16px;
font-weight: 500;
border-radius: 6px;
color: #333;
background-color: #fff;
margin: 0 5px;
transition: all 0.25s ease;
min-width: 140px;
text-align: center;
}
/* Highlighted (clicked) button — with teal text */
.active-btn {
background-color: #e0f7f5 !important; /* Light teal background */
color: teal !important; /* Teal text */
border-color: teal !important;
box-shadow: 0 0 6px rgba(0, 128, 128, 0.3);
}
/* Optional hover effect */
#vw_customers_btn:hover, #vw_prospects_btn:hover {
background-color: #f9f9f9;
}
.gr-button:has(svg) {
display: none !important;
}
.gr-image-label {
display: none !important;
}
#chat_container {
max-height: 900px;
overflow-y: auto;
margin-top: 0 !important;
margin-bottom: 0 !important;
padding-bottom: 0 !important;
}
"""
def highlight_button(button_name):
return gr.Button.update(variant="primary")
# 🧠 Chatbot backend
def vwload_nps_and_r2(file_path):
nps_img = calculate_nps_image_from_excel(file_path)
r2_img = vwcalculate_r2_image_from_excel(file_path)
return nps_img,r2_img
def load_nps_and_r2(file_path):
nps_img = calculate_nps_image_from_excel(file_path)
r2_img = calculate_r2_image_from_excel(file_path)
return nps_img, r2_img
with gr.Blocks(css=css, js=js_func) as demo:
gr.HTML("""
<style>
#trust_driver_img, #nps_driver_img {
display: inline-block;
width: 49%;
margin-right: 1%;
vertical-align: top;
}
</style>
""")
# Title and intro
with gr.Column():
gr.Markdown("""
<h2 style="text-align: center; font-size: 2.25rem; font-weight: 600;">
What drives your NPS and trust?
</h2>
""")
gr.Markdown("Quickly identify what drives your NPS and trust across different segments using the automated analyser.")
gr.Markdown("""
<span style="font-size:15px;">Volkswagen Example</span><br>
As a default, the analysis displays <strong>Volkswagen Owner</strong> results.
To trigger the analysis for <strong>Prospects</strong>, toggle to ‘VW Prospects’.
""")
with gr.Column():
with gr.Row():
vw_customers_btn = gr.Button("VW Owners", elem_id="vw_customers_btn")
vw_prospects_btn = gr.Button("VW Prospects", elem_id="vw_prospects_btn")
with gr.Row(equal_height=True):
with gr.Column(scale=1):
nps_img_output = gr.HTML()
with gr.Column(scale=1):
trust_score_output = gr.HTML()
with gr.Column(scale=1):
gr.Markdown("""<div style='text-align: center;'><h3>How much of your NPS is determined by TrustLogic®</h3></div>""")
trust_r2_img = gr.HTML()
with gr.Column():
outputs = reset_outputs()
# 🔁 States
vw_customers_state11 = gr.State(value=["example_files/VW Owners.xlsx"])
vw_prospects_state12 = gr.State(value=["example_files/Volkswagen Non Customers.xlsx"])
vw_customers_state = gr.State(value=["VW Owners.xlsx"])
vw_prospects_state = gr.State(value=["Volkswagen Non Customers.xlsx"])
selected_dataset_ai = gr.State(value="VW Owners.xlsx") # ✅ Matches dictionary key
chat_history = gr.State(value=[])
# 🧠 Chat section
with gr.Column(elem_id="chat_container"):
gr.Markdown("### Test-drive the results in the TrustLogicAI")
gr.Markdown("Our AI uses the analysis results to generate trust-optimised content.")
prompt_textbox = gr.Textbox(value=predefined_prompt, scale=4, label="Insert your prompt", visible=True)
submit_button = gr.Button("Submit")
bot = gr.Chatbot(placeholder=placeholder_text)
submit_button.click(
fn=chatbot_response,
inputs=[prompt_textbox, chat_history, selected_dataset_ai],
outputs=[bot, chat_history]
)
## All widget functions here ##
vw_customers_btn.click(
fn=display_trust_score_1,
inputs=[],
outputs=trust_score_output,
)
vw_prospects_btn.click(
fn=display_trust_score_2,
inputs=[],
outputs=trust_score_output,
)
vw_customers_btn.click(
fn=process_examples,
inputs=[vw_customers_state],
outputs= outputs,
)
vw_prospects_btn.click(
fn=process_examples,
inputs=[vw_prospects_state],
outputs= outputs,
)
def set_vw_owners():
return "VW Owners.xlsx"
def set_vw_prospects():
return "Volkswagen Non Customers.xlsx"
vw_customers_btn.click(
fn=set_vw_owners,
inputs=[],
outputs=selected_dataset_ai
)
vw_prospects_btn.click(
fn=set_vw_prospects,
inputs=[],
outputs=selected_dataset_ai
)
vw_customers_btn.click(
fn=lambda f: vwload_nps_and_r2(f[0]),
inputs=[vw_customers_state11],
outputs=[nps_img_output, trust_r2_img],
)
vw_prospects_btn.click(
fn=lambda f: load_nps_and_r2(f[0]),
inputs=[vw_prospects_state12],
outputs=[nps_img_output, trust_r2_img],
)
demo.load(
fn=lambda f: vwload_nps_and_r2(f[0]),
inputs=[vw_customers_state11],
outputs=[nps_img_output, trust_r2_img],
)
demo.load(
fn=display_trust_score_1,
inputs=[],
outputs=trust_score_output
)
demo.load(
fn=process_examples,
inputs=[vw_customers_state],
outputs=outputs
)
try:
demo.launch(server_name="0.0.0.0")
except Exception as e:
logger.error(f"Error launching Gradio app: {e}")
raise e