Spaces:
Sleeping
Sleeping
Upload dashboard.py
Browse files- dashboard.py +212 -0
dashboard.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""import pandas as pd
|
2 |
+
import json
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
from sklearn.linear_model import LinearRegression
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
def analyze_data():
|
8 |
+
# Load JSON data
|
9 |
+
with open("session_data.json", "r") as file:
|
10 |
+
data = json.load(file)
|
11 |
+
|
12 |
+
# Extract data
|
13 |
+
interactions = data["interactions"]
|
14 |
+
timestamp = data.get("timestamp", "No Timestamp")
|
15 |
+
|
16 |
+
# Parse interactions into a DataFrame for analysis
|
17 |
+
interaction_data = []
|
18 |
+
for interaction in interactions:
|
19 |
+
sentiment = interaction["sentiment"]
|
20 |
+
|
21 |
+
if isinstance(sentiment, list) and len(sentiment) > 0:
|
22 |
+
sentiment_label = sentiment[0]["label"]
|
23 |
+
sentiment_score = sentiment[0]["score"]
|
24 |
+
elif isinstance(sentiment, dict):
|
25 |
+
sentiment_label = sentiment.get("label", "No Sentiment")
|
26 |
+
sentiment_score = sentiment.get("score", 0.0)
|
27 |
+
else:
|
28 |
+
sentiment_label = "No Sentiment"
|
29 |
+
sentiment_score = 0.0
|
30 |
+
|
31 |
+
sentiment_change = interaction["sentiment_change"]
|
32 |
+
recommendations = interaction["product_recommendations"]
|
33 |
+
objection = interaction["objection_handling"]
|
34 |
+
|
35 |
+
interaction_data.append({
|
36 |
+
"Transcription": interaction["transcription"],
|
37 |
+
"Sentiment Label": sentiment_label,
|
38 |
+
"Sentiment Score": sentiment_score,
|
39 |
+
"Sentiment Change": sentiment_change,
|
40 |
+
"Objection": objection[0] if objection else None,
|
41 |
+
"Objection Response": objection[1] if len(objection) > 1 else None,
|
42 |
+
"Recommendations": recommendations,
|
43 |
+
})
|
44 |
+
|
45 |
+
df = pd.DataFrame(interaction_data)
|
46 |
+
|
47 |
+
# Convert list of recommendations to a string (hashable type)
|
48 |
+
df['Recommendations_Str'] = df['Recommendations'].apply(lambda x: ', '.join([rec[0] for rec in x]) if isinstance(x, list) else str(x))
|
49 |
+
df['Interaction Count'] = df.groupby('Recommendations_Str')['Recommendations_Str'].transform('count')
|
50 |
+
df['CLV'] = df['Interaction Count'] * 10 # Example formula
|
51 |
+
|
52 |
+
# Display data
|
53 |
+
print(f"Data Timestamp: {timestamp}")
|
54 |
+
print("\nCustomer Interaction Summary:")
|
55 |
+
print(df)
|
56 |
+
|
57 |
+
# Insights: Sentiment Trends (Pie chart)
|
58 |
+
sentiment_counts = df["Sentiment Label"].value_counts()
|
59 |
+
fig, ax = plt.subplots(figsize=(4, 4))
|
60 |
+
sentiment_counts.plot(kind="pie", autopct="%1.1f%%", ax=ax)
|
61 |
+
ax.set_ylabel("")
|
62 |
+
plt.title("Sentiment Trends")
|
63 |
+
plt.show()
|
64 |
+
|
65 |
+
# Product Recommendations (Bar chart)
|
66 |
+
all_recommendations = [rec[0] for recs in df["Recommendations"] for rec in recs]
|
67 |
+
recommendation_counts = pd.Series(all_recommendations).value_counts()
|
68 |
+
fig, ax = plt.subplots(figsize=(5, 3))
|
69 |
+
recommendation_counts.plot(kind="bar", color="skyblue", ax=ax)
|
70 |
+
ax.set_title("Top Products")
|
71 |
+
plt.show()
|
72 |
+
|
73 |
+
# Predictive Modeling for Sentiment Scores
|
74 |
+
if len(df) > 1:
|
75 |
+
X = np.arange(len(df)).reshape(-1, 1)
|
76 |
+
y = df["Sentiment Score"].fillna(0).values
|
77 |
+
|
78 |
+
model = LinearRegression()
|
79 |
+
model.fit(X, y)
|
80 |
+
|
81 |
+
future_steps = np.arange(len(df), len(df) + 5).reshape(-1, 1)
|
82 |
+
predicted_scores = model.predict(future_steps)
|
83 |
+
|
84 |
+
print("\nPredicted Sentiment Scores (Next 5 Interactions):")
|
85 |
+
print(predicted_scores)
|
86 |
+
|
87 |
+
# Visualization
|
88 |
+
fig, ax = plt.subplots(figsize=(6, 4))
|
89 |
+
ax.plot(range(len(df)), y, label="Actual Scores", marker="o")
|
90 |
+
ax.plot(range(len(df), len(df) + 5), predicted_scores, label="Predicted Scores", linestyle="--", marker="o")
|
91 |
+
ax.set_title("Sentiment Score Trends")
|
92 |
+
ax.set_xlabel("Interaction Index")
|
93 |
+
ax.set_ylabel("Sentiment Score")
|
94 |
+
ax.legend()
|
95 |
+
plt.show()
|
96 |
+
|
97 |
+
# AI Recommendations for Sales Improvement
|
98 |
+
# print("\nAI Recommendations for Sales Improvement:")
|
99 |
+
#print("""
|
100 |
+
#1. Address objections related to pricing and promotions.
|
101 |
+
#2. Highlight the most recommended products to align with customer preferences.
|
102 |
+
#3. Use sentiment trends to identify weak conversation points.
|
103 |
+
#4. Optimize follow-ups based on predicted sentiment scores.
|
104 |
+
#""")
|
105 |
+
|
106 |
+
# Display Customer Lifetime Value (CLV)
|
107 |
+
#print("\nCustomer Lifetime Value (CLV):")
|
108 |
+
#print(df[['Recommendations_Str', 'Interaction Count', 'CLV']])
|
109 |
+
|
110 |
+
#if __name__ == "__main__":
|
111 |
+
#analyze_data()"""
|
112 |
+
import pandas as pd
|
113 |
+
import json
|
114 |
+
import matplotlib.pyplot as plt
|
115 |
+
from sklearn.linear_model import LinearRegression
|
116 |
+
import numpy as np
|
117 |
+
from wordcloud import WordCloud
|
118 |
+
|
119 |
+
def analyze_data(session_data):
|
120 |
+
# Extract data from the passed session data
|
121 |
+
interactions = session_data["interactions"]
|
122 |
+
timestamp = session_data.get("timestamp", "No Timestamp")
|
123 |
+
|
124 |
+
# Parse interactions into a DataFrame
|
125 |
+
interaction_data = []
|
126 |
+
all_transcriptions = []
|
127 |
+
for interaction in interactions:
|
128 |
+
sentiment = interaction["sentiment"]
|
129 |
+
|
130 |
+
if isinstance(sentiment, list) and len(sentiment) > 0:
|
131 |
+
sentiment_label = sentiment[0]["label"]
|
132 |
+
sentiment_score = sentiment[0]["score"]
|
133 |
+
elif isinstance(sentiment, dict):
|
134 |
+
sentiment_label = sentiment.get("label", "No Sentiment")
|
135 |
+
sentiment_score = sentiment.get("score", 0.0)
|
136 |
+
else:
|
137 |
+
sentiment_label = "No Sentiment"
|
138 |
+
sentiment_score = 0.0
|
139 |
+
|
140 |
+
recommendations = interaction["product_recommendations"]
|
141 |
+
objection = interaction["objection_handling"]
|
142 |
+
transcription = interaction["transcription"]
|
143 |
+
all_transcriptions.append(transcription)
|
144 |
+
|
145 |
+
interaction_data.append({
|
146 |
+
"Transcription": transcription,
|
147 |
+
"Sentiment Label": sentiment_label,
|
148 |
+
"Sentiment Score": sentiment_score,
|
149 |
+
"Objection": objection.get("objection") if objection else None,
|
150 |
+
"Objection Response": objection.get("response") if objection else None,
|
151 |
+
"Recommendations": recommendations,
|
152 |
+
})
|
153 |
+
|
154 |
+
|
155 |
+
df = pd.DataFrame(interaction_data)
|
156 |
+
|
157 |
+
# Handle insufficient data for sentiment predictions
|
158 |
+
if len(df) <= 1:
|
159 |
+
sentiment_predictions = "Insufficient data for predictions"
|
160 |
+
else:
|
161 |
+
# Predictive Modeling for Sentiment Scores
|
162 |
+
X = np.arange(len(df)).reshape(-1, 1)
|
163 |
+
y = df["Sentiment Score"].fillna(0).values
|
164 |
+
|
165 |
+
model = LinearRegression()
|
166 |
+
model.fit(X, y)
|
167 |
+
|
168 |
+
future_steps = np.arange(len(df), len(df) + 5).reshape(-1, 1)
|
169 |
+
sentiment_predictions = model.predict(future_steps)
|
170 |
+
|
171 |
+
# Generate Insights
|
172 |
+
sentiment_counts = df["Sentiment Label"].value_counts()
|
173 |
+
|
174 |
+
# Create a pie chart for sentiment trends
|
175 |
+
fig_sentiment, ax = plt.subplots(figsize=(4, 4))
|
176 |
+
sentiment_counts.plot(kind="pie", autopct="%1.1f%%", ax=ax)
|
177 |
+
ax.set_ylabel("")
|
178 |
+
plt.title("Sentiment Trends")
|
179 |
+
|
180 |
+
# Create a bar chart for product recommendations
|
181 |
+
all_recommendations = [rec[0] for recs in df["Recommendations"] for rec in recs]
|
182 |
+
recommendation_counts = pd.Series(all_recommendations).value_counts()
|
183 |
+
fig_recommendations, ax = plt.subplots(figsize=(5, 3))
|
184 |
+
recommendation_counts.plot(kind="bar", color="skyblue", ax=ax)
|
185 |
+
ax.set_title("Top Products")
|
186 |
+
|
187 |
+
# Generate a Word Cloud for Transcriptions
|
188 |
+
all_text = " ".join(all_transcriptions)
|
189 |
+
wordcloud = WordCloud(background_color="white").generate(all_text)
|
190 |
+
fig_wordcloud, ax = plt.subplots(figsize=(6, 4))
|
191 |
+
ax.imshow(wordcloud, interpolation="bilinear")
|
192 |
+
ax.axis("off")
|
193 |
+
plt.title("Call Topics")
|
194 |
+
|
195 |
+
# Actionable Recommendations
|
196 |
+
actionable_recommendations = [
|
197 |
+
"Focus on objection handling related to pricing concerns.",
|
198 |
+
"Highlight top-performing products during calls.",
|
199 |
+
"Leverage sentiment trends to adjust tone and messaging.",
|
200 |
+
"Plan follow-ups based on predicted sentiment trends."
|
201 |
+
]
|
202 |
+
|
203 |
+
# Return results for Streamlit display
|
204 |
+
return {
|
205 |
+
"timestamp": timestamp,
|
206 |
+
"summary_table": df,
|
207 |
+
"sentiment_chart": fig_sentiment,
|
208 |
+
"recommendation_chart": fig_recommendations,
|
209 |
+
"sentiment_predictions": sentiment_predictions,
|
210 |
+
"wordcloud": fig_wordcloud,
|
211 |
+
"actionable_recommendations": actionable_recommendations,
|
212 |
+
}
|