Spaces:
Sleeping
Sleeping
File size: 10,215 Bytes
6528c77 39cf944 929cfb4 39cf944 6528c77 39cf944 2c58f4e 6528c77 daf2b71 39cf944 6528c77 daf2b71 39cf944 6528c77 daf2b71 6528c77 daf2b71 39cf944 daf2b71 39cf944 daf2b71 39cf944 daf2b71 6334788 daf2b71 39cf944 daf2b71 39cf944 daf2b71 6334788 d164098 6334788 d164098 6334788 d164098 6334788 d164098 6334788 39cf944 daf2b71 39cf944 daf2b71 6aa7fe7 daf2b71 6334788 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import gradio as gr
import json
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import pandas as pd
from difflib import SequenceMatcher
from visualization.ngram_visualizer import create_ngram_visualization
from visualization.topic_visualizer import process_and_visualize_topic_analysis # Added import
def create_bow_visualization(analysis_results):
"""
Create visualizations for bag of words analysis results
Args:
analysis_results (dict): Analysis results from the bow analysis
Returns:
list: List of gradio components with visualizations
"""
# Parse analysis results if it's a string
if isinstance(analysis_results, str):
try:
results = json.loads(analysis_results)
except json.JSONDecodeError:
return [gr.Markdown("Error parsing analysis results.")]
else:
results = analysis_results
output_components = []
# Check if we have valid results
if not results or "analyses" not in results:
return [gr.Markdown("No analysis results found.")]
# Process each prompt
for prompt, analyses in results["analyses"].items():
output_components.append(gr.Markdown(f"## Analysis of Prompt: \"{prompt}\""))
# Process Bag of Words analysis if available
if "bag_of_words" in analyses:
bow_results = analyses["bag_of_words"]
# Show models being compared
models = bow_results.get("models", [])
if len(models) >= 2:
output_components.append(gr.Markdown(f"### Comparing responses from {models[0]} and {models[1]}"))
# Get important words for each model
important_words = bow_results.get("important_words", {})
# Prepare data for plotting important words
if important_words:
for model_name, words in important_words.items():
df = pd.DataFrame(words)
# Create bar chart for top words
fig = px.bar(df, x='word', y='count',
title=f"Top Words Used by {model_name}",
labels={'word': 'Word', 'count': 'Frequency'},
height=400)
# Improve layout
fig.update_layout(
xaxis_title="Word",
yaxis_title="Frequency",
xaxis={'categoryorder':'total descending'}
)
output_components.append(gr.Plot(value=fig))
# Visualize differential words (words with biggest frequency difference)
diff_words = bow_results.get("differential_words", [])
word_matrix = bow_results.get("word_count_matrix", {})
if diff_words and word_matrix and len(diff_words) > 0:
output_components.append(gr.Markdown("### Words with Biggest Frequency Differences"))
# Create dataframe for plotting
model1, model2 = models[0], models[1]
diff_data = []
for word in diff_words[:15]: # Limit to top 15 for readability
if word in word_matrix:
counts = word_matrix[word]
diff_data.append({
"word": word,
model1: counts.get(model1, 0),
model2: counts.get(model2, 0)
})
if diff_data:
diff_df = pd.DataFrame(diff_data)
# Create grouped bar chart
fig = go.Figure()
fig.add_trace(go.Bar(
x=diff_df['word'],
y=diff_df[model1],
name=model1,
marker_color='indianred'
))
fig.add_trace(go.Bar(
x=diff_df['word'],
y=diff_df[model2],
name=model2,
marker_color='lightsalmon'
))
fig.update_layout(
title="Word Frequency Comparison",
xaxis_title="Word",
yaxis_title="Frequency",
barmode='group',
height=500
)
output_components.append(gr.Plot(value=fig))
# If no components were added, show a message
if len(output_components) <= 1:
output_components.append(gr.Markdown("No detailed Bag of Words analysis found in results."))
return output_components
# update the process_and_visualize_analysis function
def process_and_visualize_analysis(analysis_results):
"""
Process the analysis results and create visualization components
Args:
analysis_results (dict): The analysis results
Returns:
list: List of gradio components for visualization
"""
try:
print(f"Starting visualization of analysis results: {type(analysis_results)}")
components = []
if not analysis_results or "analyses" not in analysis_results:
print("Warning: Empty or invalid analysis results")
components.append(gr.Markdown("No analysis results to visualize."))
return components
# For each prompt in the analysis results
for prompt, analyses in analysis_results.get("analyses", {}).items():
print(f"Visualizing results for prompt: {prompt[:30]}...")
components.append(gr.Markdown(f"## Analysis for Prompt:\n\"{prompt}\""))
# Check for Bag of Words analysis
if "bag_of_words" in analyses:
print("Processing Bag of Words visualization")
components.append(gr.Markdown("### Bag of Words Analysis"))
bow_results = analyses["bag_of_words"]
# Display models compared
if "models" in bow_results:
models = bow_results["models"]
components.append(gr.Markdown(f"**Models compared**: {', '.join(models)}"))
# Display important words for each model
if "important_words" in bow_results:
components.append(gr.Markdown("#### Most Common Words by Model"))
for model, words in bow_results["important_words"].items():
print(f"Creating word list for model {model}")
word_list = [f"{item['word']} ({item['count']})" for item in words[:10]]
components.append(gr.Markdown(f"**{model}**: {', '.join(word_list)}"))
# Add visualizations for word frequency differences
if "differential_words" in bow_results and "word_count_matrix" in bow_results and len(
bow_results["models"]) >= 2:
diff_words = bow_results["differential_words"]
word_matrix = bow_results["word_count_matrix"]
models = bow_results["models"]
if diff_words and word_matrix and len(diff_words) > 0:
components.append(gr.Markdown("### Words with Biggest Frequency Differences"))
# Create dataframe for plotting
model1, model2 = models[0], models[1]
diff_data = []
for word in diff_words[:10]: # Limit to top 10 for readability
if word in word_matrix:
counts = word_matrix[word]
model1_count = counts.get(model1, 0)
model2_count = counts.get(model2, 0)
# Only include if there's a meaningful difference
if abs(model1_count - model2_count) > 0:
components.append(gr.Markdown(
f"- **{word}**: {model1}: {model1_count}, {model2}: {model2_count}"
))
# Check for N-gram analysis
if "ngram_analysis" in analyses:
print("Processing N-gram visualization")
# Use the dedicated n-gram visualization function
ngram_components = create_ngram_visualization(
{"analyses": {prompt: {"ngram_analysis": analyses["ngram_analysis"]}}})
components.extend(ngram_components)
# Check for Topic Modeling analysis
if "topic_modeling" in analyses:
print("Processing Topic Modeling visualization")
# Use the dedicated topic visualization function
topic_components = process_and_visualize_topic_analysis(
{"analyses": {prompt: {"topic_modeling": analyses["topic_modeling"]}}})
components.extend(topic_components)
if not components:
components.append(gr.Markdown("No visualization components could be created from the analysis results."))
print(f"Visualization complete: generated {len(components)} components")
return components
except Exception as e:
import traceback
error_msg = f"Visualization error: {str(e)}\n{traceback.format_exc()}"
print(error_msg)
return [gr.Markdown(f"**Error during visualization:**\n\n```\n{error_msg}\n```")]
|