Spaces:
Running
Running
wrapping up Space
Browse files- README.md +16 -3
- app.py +157 -90
- poetry.lock +79 -1
- pyproject.toml +1 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: purple
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
@@ -8,6 +8,19 @@ sdk_version: 4.42.0
|
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
|
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: State of the Union Analysis
|
3 |
+
emoji: π
|
4 |
colorFrom: purple
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
|
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
+
short_description: A set of data visualizations for all recorded U.S. State of the Union speeches and messages.
|
12 |
+
datasets: jsulz/state-of-the-union-addresses
|
13 |
---
|
14 |
|
15 |
+
# State of the Union Analysis
|
16 |
+
|
17 |
+
This Space is a Gradio data dashboard for visualizing different aspects of State of the Union addresses over the years.
|
18 |
+
|
19 |
+
The data comes from a Hugging Face dataset - [jsulz/state-of-the-union-addresses](https://huggingface.co/datasets/jsulz/state-of-the-union-addresses). To read more about how the data was collected and transformed, visit the [dataset card](https://huggingface.co/datasets/jsulz/state-of-the-union-addresses).
|
20 |
+
|
21 |
+
The Space makes use of:
|
22 |
+
|
23 |
+
- Gradio
|
24 |
+
- Plotly (for the charts)
|
25 |
+
- nltk (to create an n-gram visualization)
|
26 |
+
- datasets (for loading the dataset from Hugging Face)
|
app.py
CHANGED
@@ -1,27 +1,20 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from datasets import load_dataset
|
3 |
from nltk.util import ngrams
|
4 |
-
from collections import Counter
|
5 |
import pandas as pd
|
6 |
import plotly.express as px
|
7 |
import plotly.graph_objects as go
|
8 |
from plotly.subplots import make_subplots
|
9 |
-
import
|
|
|
10 |
|
11 |
# Load the dataset and convert it to a Pandas dataframe
|
12 |
sotu_dataset = "jsulz/state-of-the-union-addresses"
|
13 |
dataset = load_dataset(sotu_dataset)
|
14 |
df = dataset["train"].to_pandas()
|
15 |
-
#
|
16 |
-
|
17 |
-
df["tokens-nostop"] = df["tokens-nostop"].apply(
|
18 |
-
lambda x: x.decode("utf-8")
|
19 |
-
.replace('"', "")
|
20 |
-
.replace("[", "")
|
21 |
-
.replace("]", "")
|
22 |
-
.split(",")
|
23 |
-
)
|
24 |
-
"""
|
25 |
df["word_count"] = df["speech_html"].apply(lambda x: len(x.split()))
|
26 |
# calculate the automated readibility index reading ease score for each address
|
27 |
# automated readability index = 4.71 * (characters/words) + 0.5 * (words/sentences) - 21.43
|
@@ -30,33 +23,133 @@ df["ari"] = df["no-contractions"].apply(
|
|
30 |
+ (0.5 * (len(x.split()) / len(x.split("."))))
|
31 |
- 21.43
|
32 |
)
|
|
|
33 |
df = df.sort_values(by="date")
|
34 |
written = df[df["categories"] == "Written"]
|
35 |
spoken = df[df["categories"] == "Spoken"]
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
# Create a Gradio interface with blocks
|
38 |
with gr.Blocks() as demo:
|
|
|
39 |
gr.Markdown(
|
40 |
"""
|
41 |
# A Dashboard to Analyze the State of the Union Addresses
|
|
|
42 |
"""
|
43 |
)
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
# group by president and category and calculate the average word count sort by date
|
57 |
avg_word_count = (
|
58 |
df.groupby(["potus", "categories"])["word_count"].mean().reset_index()
|
59 |
)
|
|
|
60 |
fig2 = px.bar(
|
61 |
avg_word_count,
|
62 |
x="potus",
|
@@ -76,7 +169,14 @@ with gr.Blocks() as demo:
|
|
76 |
),
|
77 |
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
|
78 |
)
|
|
|
|
|
|
|
|
|
|
|
79 |
gr.Plot(fig2)
|
|
|
|
|
80 |
with gr.Row():
|
81 |
ari = df[["potus", "date", "ari", "categories"]]
|
82 |
fig3 = px.line(
|
@@ -90,83 +190,50 @@ with gr.Blocks() as demo:
|
|
90 |
xaxis=dict(title="Date of Address"),
|
91 |
yaxis=dict(title="ARI Score"),
|
92 |
)
|
93 |
-
gr.Plot(fig3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
# get all unique president names
|
95 |
presidents = df["potus"].unique()
|
96 |
# convert presidents to a list
|
97 |
presidents = presidents.tolist()
|
98 |
# create a dropdown to select a president
|
99 |
president = gr.Dropdown(label="Select a President", choices=presidents)
|
|
|
100 |
grams = gr.Slider(minimum=1, maximum=4, step=1, label="N-grams", interactive=True)
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
potus_df = df[df["potus"] == potus]
|
106 |
-
# decode the tokens-nostop column from a byte array to a list of string
|
107 |
-
trigrams = (
|
108 |
-
potus_df["tokens-nostop"]
|
109 |
-
.apply(lambda x: list(ngrams(x, n_grams)))
|
110 |
-
.apply(Counter)
|
111 |
-
.sum()
|
112 |
-
)
|
113 |
-
# get the most common trigrams
|
114 |
-
common_trigrams = trigrams.most_common(10)
|
115 |
-
# unzip the list of tuples and plot the trigrams and counts as a bar chart
|
116 |
-
trigrams, counts = zip(*common_trigrams)
|
117 |
-
# join the trigrams into a single string
|
118 |
-
trigrams = [" ".join(trigram) for trigram in trigrams]
|
119 |
-
# create a dataframe from the trigrams and counts
|
120 |
-
trigrams_df = pd.DataFrame({"trigrams": trigrams, "counts": counts})
|
121 |
-
fig4 = px.bar(
|
122 |
-
trigrams_df,
|
123 |
-
x="counts",
|
124 |
-
y="trigrams",
|
125 |
-
title=f"{potus}'s top {n_grams}-grams",
|
126 |
-
orientation="h",
|
127 |
-
height=400,
|
128 |
-
)
|
129 |
-
return fig4
|
130 |
|
131 |
if president != "All" and president is not None:
|
132 |
-
gr.Plot(
|
133 |
-
|
134 |
-
|
135 |
-
if president != "All" and president is not None:
|
136 |
-
potus_df = df[df["potus"] == president]
|
137 |
-
fig5 = make_subplots(specs=[[{"secondary_y": True}]])
|
138 |
-
fig5.add_trace(
|
139 |
-
go.Scatter(
|
140 |
-
x=potus_df["date"],
|
141 |
-
y=potus_df["word_count"],
|
142 |
-
name="Word Count",
|
143 |
-
),
|
144 |
-
secondary_y=False,
|
145 |
-
)
|
146 |
-
fig5.add_trace(
|
147 |
-
go.Scatter(
|
148 |
-
x=potus_df["date"],
|
149 |
-
y=potus_df["ari"],
|
150 |
-
name="ARI",
|
151 |
-
),
|
152 |
-
secondary_y=True,
|
153 |
-
)
|
154 |
-
# Add figure title
|
155 |
-
fig5.update_layout(title_text="Address Word Count and ARI")
|
156 |
-
|
157 |
-
# Set x-axis title
|
158 |
-
fig5.update_xaxes(title_text="Date of Address")
|
159 |
-
|
160 |
-
# Set y-axes titles
|
161 |
-
fig5.update_yaxes(title_text="Word Count", secondary_y=False)
|
162 |
-
fig5.update_yaxes(title_text="ARI", secondary_y=True)
|
163 |
-
return fig5
|
164 |
-
|
165 |
-
# calculate the total number of words in the speech_html column and add it to a new column
|
166 |
-
# if the president is "All", show the word count for all presidents
|
167 |
-
# if the president is not "All", show the word count for the selected president
|
168 |
if president != "All" and president is not None:
|
169 |
-
gr.Plot(
|
170 |
|
171 |
|
172 |
demo.launch(share=True)
|
|
|
1 |
+
from collections import Counter
|
2 |
import gradio as gr
|
3 |
from datasets import load_dataset
|
4 |
from nltk.util import ngrams
|
|
|
5 |
import pandas as pd
|
6 |
import plotly.express as px
|
7 |
import plotly.graph_objects as go
|
8 |
from plotly.subplots import make_subplots
|
9 |
+
from matplotlib import pyplot as plt
|
10 |
+
from wordcloud import WordCloud
|
11 |
|
12 |
# Load the dataset and convert it to a Pandas dataframe
|
13 |
sotu_dataset = "jsulz/state-of-the-union-addresses"
|
14 |
dataset = load_dataset(sotu_dataset)
|
15 |
df = dataset["train"].to_pandas()
|
16 |
+
# Do some on-the-fly calculations
|
17 |
+
# calcualte the number of words in each address
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
df["word_count"] = df["speech_html"].apply(lambda x: len(x.split()))
|
19 |
# calculate the automated readibility index reading ease score for each address
|
20 |
# automated readability index = 4.71 * (characters/words) + 0.5 * (words/sentences) - 21.43
|
|
|
23 |
+ (0.5 * (len(x.split()) / len(x.split("."))))
|
24 |
- 21.43
|
25 |
)
|
26 |
+
# Sort the dataframe by date because Plotly doesn't do any of this automatically
|
27 |
df = df.sort_values(by="date")
|
28 |
written = df[df["categories"] == "Written"]
|
29 |
spoken = df[df["categories"] == "Spoken"]
|
30 |
|
31 |
+
"""
|
32 |
+
Helper functions for Plotly charts
|
33 |
+
"""
|
34 |
+
|
35 |
+
|
36 |
+
def plotly_ngrams(n_grams, potus):
|
37 |
+
if potus is not None:
|
38 |
+
# Filter on the potus
|
39 |
+
potus_df = df[df["potus"] == potus]
|
40 |
+
# Create a counter generator for the n-grams
|
41 |
+
trigrams = (
|
42 |
+
potus_df["tokens-nostop"]
|
43 |
+
.apply(lambda x: list(ngrams(x, n_grams)))
|
44 |
+
.apply(Counter)
|
45 |
+
.sum()
|
46 |
+
)
|
47 |
+
# get the most common trigrams
|
48 |
+
common_trigrams = trigrams.most_common(10)
|
49 |
+
# unzip the list of tuples and plot the trigrams and counts as a bar chart
|
50 |
+
trigrams, counts = zip(*common_trigrams)
|
51 |
+
# join the trigrams into a single string
|
52 |
+
trigrams = [" ".join(trigram) for trigram in trigrams]
|
53 |
+
# create a dataframe from the trigrams and counts
|
54 |
+
trigrams_df = pd.DataFrame({"trigrams": trigrams, "counts": counts})
|
55 |
+
fig4 = px.bar(
|
56 |
+
trigrams_df,
|
57 |
+
x="counts",
|
58 |
+
y="trigrams",
|
59 |
+
title=f"{potus}'s top {n_grams}-grams",
|
60 |
+
orientation="h",
|
61 |
+
height=400,
|
62 |
+
)
|
63 |
+
return fig4
|
64 |
+
|
65 |
+
|
66 |
+
def plotly_word_and_ari(president):
|
67 |
+
if president != "All" and president is not None:
|
68 |
+
potus_df = df[df["potus"] == president]
|
69 |
+
fig5 = make_subplots(specs=[[{"secondary_y": True}]])
|
70 |
+
fig5.add_trace(
|
71 |
+
go.Scatter(
|
72 |
+
x=potus_df["date"],
|
73 |
+
y=potus_df["word_count"],
|
74 |
+
name="Word Count",
|
75 |
+
),
|
76 |
+
secondary_y=False,
|
77 |
+
)
|
78 |
+
fig5.add_trace(
|
79 |
+
go.Scatter(
|
80 |
+
x=potus_df["date"],
|
81 |
+
y=potus_df["ari"],
|
82 |
+
name="ARI",
|
83 |
+
),
|
84 |
+
secondary_y=True,
|
85 |
+
)
|
86 |
+
# Add figure title
|
87 |
+
fig5.update_layout(title_text="Address Word Count and ARI")
|
88 |
+
|
89 |
+
# Set x-axis title
|
90 |
+
fig5.update_xaxes(title_text="Date of Address")
|
91 |
+
|
92 |
+
# Set y-axes titles
|
93 |
+
fig5.update_yaxes(title_text="Word Count", secondary_y=False)
|
94 |
+
fig5.update_yaxes(title_text="ARI", secondary_y=True)
|
95 |
+
return fig5
|
96 |
+
|
97 |
+
|
98 |
+
def plt_wordcloud(president):
|
99 |
+
if president != "All" and president is not None:
|
100 |
+
potus_df = df[df["potus"] == president]
|
101 |
+
lemmatized = potus_df["lemmatized"].apply(lambda x: " ".join(x))
|
102 |
+
# build a single string from lemmatized
|
103 |
+
lemmatized = " ".join(lemmatized)
|
104 |
+
# create a wordcloud from the lemmatized column of the dataframe
|
105 |
+
wordcloud = WordCloud(background_color="white", width=800, height=400).generate(
|
106 |
+
lemmatized
|
107 |
+
)
|
108 |
+
# create a matplotlib figure
|
109 |
+
fig6 = plt.figure(figsize=(8, 4))
|
110 |
+
# add the wordcloud to the figure
|
111 |
+
plt.tight_layout()
|
112 |
+
plt.imshow(wordcloud, interpolation="bilinear")
|
113 |
+
plt.axis("off")
|
114 |
+
return fig6
|
115 |
+
|
116 |
+
|
117 |
# Create a Gradio interface with blocks
|
118 |
with gr.Blocks() as demo:
|
119 |
+
# Build out the top level static charts and content
|
120 |
gr.Markdown(
|
121 |
"""
|
122 |
# A Dashboard to Analyze the State of the Union Addresses
|
123 |
+
This dashboard provides an analysis of all State of the Union (SOTU) addresses from 1790 to 2020 including written and spoken addresses. The data is sourced from the [State of the Union Addresses dataset](https://huggingface.co/jsulz/state-of-the-union-addresses) on the Hugging Face Datasets Hub. You can read more about how the data was gathered and cleaned on the dataset card. To read the speeches, you can visit the [The American Presidency Project's State of the Union page](https://www.presidency.ucsb.edu/documents/presidential-documents-archive-guidebook/annual-messages-congress-the-state-the-union) where this data was sourced.
|
124 |
"""
|
125 |
)
|
126 |
+
# Basic line chart showing the total number of words in each address
|
127 |
+
with gr.Row():
|
128 |
+
gr.Markdown(
|
129 |
+
"""
|
130 |
+
## The shape of words
|
131 |
+
The line chart to the right shows the total number of words in each address. However, not all SOTUs are created equally. From 1801 to 1916, each address was a written message to Congress. In 1913, Woodrow Wilson broke with tradition and delivered his address in person. Since then, the addresses have been a mix of written and spoken (mostly spoken).
|
132 |
+
|
133 |
+
The spikes you see in the early 1970's and early 1980's are from written addresses by Richard Nixon and Jimmy Carter respectively.
|
134 |
+
"""
|
135 |
+
)
|
136 |
+
fig1 = px.line(
|
137 |
+
df,
|
138 |
+
x="date",
|
139 |
+
y="word_count",
|
140 |
+
title="Total Number of Words in Addresses",
|
141 |
+
line_shape="spline",
|
142 |
+
)
|
143 |
+
fig1.update_layout(
|
144 |
+
xaxis=dict(title="Date of Address"),
|
145 |
+
yaxis=dict(title="Word Count"),
|
146 |
+
)
|
147 |
+
gr.Plot(fig1, scale=2)
|
148 |
# group by president and category and calculate the average word count sort by date
|
149 |
avg_word_count = (
|
150 |
df.groupby(["potus", "categories"])["word_count"].mean().reset_index()
|
151 |
)
|
152 |
+
# Build a bar chart showing the average number of words in each address by president
|
153 |
fig2 = px.bar(
|
154 |
avg_word_count,
|
155 |
x="potus",
|
|
|
169 |
),
|
170 |
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
|
171 |
)
|
172 |
+
gr.Markdown(
|
173 |
+
"""
|
174 |
+
Now that we have a little historical context, what does this data look like if we split things out by president? The bar chart below shows the average number of words in each address by president. The bars are grouped by written and spoken addresses.
|
175 |
+
"""
|
176 |
+
)
|
177 |
gr.Plot(fig2)
|
178 |
+
|
179 |
+
# Create a line chart showing the Automated Readability Index in each address
|
180 |
with gr.Row():
|
181 |
ari = df[["potus", "date", "ari", "categories"]]
|
182 |
fig3 = px.line(
|
|
|
190 |
xaxis=dict(title="Date of Address"),
|
191 |
yaxis=dict(title="ARI Score"),
|
192 |
)
|
193 |
+
gr.Plot(fig3, scale=2)
|
194 |
+
gr.Markdown(
|
195 |
+
"""
|
196 |
+
The line chart to the left shows the Automated Redibility Index (ARI) for each speech by year. The ARI is calculated using the formula: 4.71 * (characters/words) + 0.5 * (words/sentences) - 21.43. In general, ARI scores correspond to U.S. grade levels. For example, an ARI of 8.0 corresponds to an 8th grade reading level.
|
197 |
+
|
198 |
+
While there are other scores that are more representative of attributes we might want to measure, they require values like syllables. The ARI is a simple score to compute with our data.
|
199 |
+
|
200 |
+
The drop off is quite noticeable, don't you think? ;)
|
201 |
+
"""
|
202 |
+
)
|
203 |
+
gr.Markdown(
|
204 |
+
"""
|
205 |
+
## Dive Deeper on Each President
|
206 |
+
|
207 |
+
Use the dropdown to select a president a go a little deeper.
|
208 |
+
|
209 |
+
To begin with, there is an [n-gram](https://en.wikipedia.org/wiki/N-gram) bar chart built from all of the given president's addresses. An n-gram is a contiguous sequence of n items from a given sample of text or speech. Because written and spoken speech is littered with so-called "stop words" such as "and", "the", and "but", we've removed these from the text.
|
210 |
+
|
211 |
+
The slider only goes up to 4-grams because the data is sparse beyond that. I personally found the n-grams from our last three presidents to be less than inspiring and full of platitudes. Earlier presidents have more interesting n-grams.
|
212 |
+
|
213 |
+
Next up is a word cloud of the lemmatized text from the president's addresses. [Lemmatization](https://en.wikipedia.org/wiki/Lemmatization) is the process of grouping together the inflected forms of a word so they can be analyzed as a single item. Think of this as a more advanced version of [stemming](https://en.wikipedia.org/wiki/Stemming) where we can establish novel links between words like "better" and "good" that might otherwise be overlooked in stemming.
|
214 |
+
|
215 |
+
You can also see a line chart of word count and ARI for each address.
|
216 |
+
"""
|
217 |
+
)
|
218 |
# get all unique president names
|
219 |
presidents = df["potus"].unique()
|
220 |
# convert presidents to a list
|
221 |
presidents = presidents.tolist()
|
222 |
# create a dropdown to select a president
|
223 |
president = gr.Dropdown(label="Select a President", choices=presidents)
|
224 |
+
# create a slider for number of word grams
|
225 |
grams = gr.Slider(minimum=1, maximum=4, step=1, label="N-grams", interactive=True)
|
226 |
|
227 |
+
# show a bar chart of the top n-grams for a selected president
|
228 |
+
if president != "All" and president is not None:
|
229 |
+
gr.Plot(plotly_ngrams, inputs=[grams, president])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
|
231 |
if president != "All" and president is not None:
|
232 |
+
gr.Plot(plt_wordcloud, scale=2, inputs=[president])
|
233 |
+
|
234 |
+
# show a line chart of word count and ARI for a selected president
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
if president != "All" and president is not None:
|
236 |
+
gr.Plot(plotly_word_and_ari, inputs=[president])
|
237 |
|
238 |
|
239 |
demo.launch(share=True)
|
poetry.lock
CHANGED
@@ -2422,6 +2422,84 @@ files = [
|
|
2422 |
{file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
|
2423 |
]
|
2424 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2425 |
[[package]]
|
2426 |
name = "xxhash"
|
2427 |
version = "3.5.0"
|
@@ -2660,4 +2738,4 @@ multidict = ">=4.0"
|
|
2660 |
[metadata]
|
2661 |
lock-version = "2.0"
|
2662 |
python-versions = "^3.12"
|
2663 |
-
content-hash = "
|
|
|
2422 |
{file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
|
2423 |
]
|
2424 |
|
2425 |
+
[[package]]
|
2426 |
+
name = "wordcloud"
|
2427 |
+
version = "1.9.3"
|
2428 |
+
description = "A little word cloud generator"
|
2429 |
+
optional = false
|
2430 |
+
python-versions = ">=3.7"
|
2431 |
+
files = [
|
2432 |
+
{file = "wordcloud-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fce423a24e6ca1b89b2770a7c6917d6e26f04bcfefa601cf61819b2fc0770c4"},
|
2433 |
+
{file = "wordcloud-1.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3b6adfc1465b9176b8bc602745dd3ed8ea782b006a81cb59eab3dde92ad9f94c"},
|
2434 |
+
{file = "wordcloud-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6db37a6f5abeba51a5d503228ea320d4f2fa774864103e7b24acd9dd86fd0e"},
|
2435 |
+
{file = "wordcloud-1.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e74ac99e9582873d7ee28bd03e125dcf73ae46666d55fb4c13e82e90c0e074a"},
|
2436 |
+
{file = "wordcloud-1.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4001317c0e3b5cb6fd106228ddcd27524d1caf9ae468b3c2c2fc571c6ce56b22"},
|
2437 |
+
{file = "wordcloud-1.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f86042e5ce12e2795798033a56f0246906b4d7d9027d554b6cd951ce2fd342a"},
|
2438 |
+
{file = "wordcloud-1.9.3-cp310-cp310-win32.whl", hash = "sha256:3b90f0390c0a05ba4b4580fb765a3d45d8d21519b50ca5006d6dbdc2a0b86507"},
|
2439 |
+
{file = "wordcloud-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:6f7977285df9254b8704d3f895c06814a6183c6c89e140d6281848c076635e91"},
|
2440 |
+
{file = "wordcloud-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ced0d5c946d82cfc778febafe3eedeb0bae07dd57ea4f21fe06b9ec8225ab31"},
|
2441 |
+
{file = "wordcloud-1.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f5499e6360219e61808dc0d2b00cd5104f78a82d2ae8f7986df04731713835f"},
|
2442 |
+
{file = "wordcloud-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb1e8bb7d60f7a90fa8439c7b56dd1df60766115fd57480ac0d83ca5204e0117"},
|
2443 |
+
{file = "wordcloud-1.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e33328044db5c01487f2a3a023b5476947942dacd6a5dc8c217fa039f6c5bd9"},
|
2444 |
+
{file = "wordcloud-1.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:998dc0dc8fcbff88f566f17cb5e0eb3bb21fcafd387b0670be6c14feacaf4cdc"},
|
2445 |
+
{file = "wordcloud-1.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e1a1c3cfa86b605a19711ec58920ccb694dca9d5c9d00b373f4d5952d63793e9"},
|
2446 |
+
{file = "wordcloud-1.9.3-cp311-cp311-win32.whl", hash = "sha256:f504e3291256c0b6fca044602f8f0e5cb56b7c33724cde9d279c4077fa5b6d27"},
|
2447 |
+
{file = "wordcloud-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:103c9b0465e1cf5b7a38b49ab1c3a0b0301762fa56602ac79287f9d22b46ade3"},
|
2448 |
+
{file = "wordcloud-1.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfea303fc1dec4811e4a5671a8021a89724b6fa70639d059ad30c492932be447"},
|
2449 |
+
{file = "wordcloud-1.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:512f3c9a2e8579269a33ac9219d042fd0cc5a3a524ee68079238a3e4efe2b879"},
|
2450 |
+
{file = "wordcloud-1.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d00d916509a17b432032161d492ed7f30b2ebd921303090fe1d2b57011a49cc0"},
|
2451 |
+
{file = "wordcloud-1.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5e0e7bbd269a62baa63ea2175faea4d74435c0ad828f3d5999fa4c33ebe0629"},
|
2452 |
+
{file = "wordcloud-1.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:483aa4f8d17b9744a3b238269593d1794b962fc757a72a9e7e8468c2665cffb7"},
|
2453 |
+
{file = "wordcloud-1.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:64b342a79553970fa04083761d041067323219ad62b5550a496e42436d23cbb3"},
|
2454 |
+
{file = "wordcloud-1.9.3-cp312-cp312-win32.whl", hash = "sha256:419acfe0b1d1227b9e3e14ec1bb6c40fd7fa652df4adf81f0ba3e00daca500b5"},
|
2455 |
+
{file = "wordcloud-1.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:2061a9978a6243107ce1a8a9fa24f421b03a0f7e620769b6f5075857e75aa615"},
|
2456 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:21f47fabe189f39532378759300a624ae166519dfafbd6a22cfe65b14a7d104d"},
|
2457 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524065f8a5a79e00748f45efbeacd25ac1d15850e0d0588753b17a8b2de2a6a7"},
|
2458 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b2bb53492bc8663ba90a300bbd2da7be5059f9ad192ed1150e9bbbda8016c9a"},
|
2459 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:643243474faee460e7d08944d3e529c58d0cbf8be11626fbb918ee8ccb913a23"},
|
2460 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d95f44739a6972abfb97c12656999952dd28ed03700ee8b6efe35d688d489b36"},
|
2461 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-win32.whl", hash = "sha256:e56364c8829d399397a649501f834c12751ab106cba488ba8d86d532889b528c"},
|
2462 |
+
{file = "wordcloud-1.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:78f4a3fd3526884e4f526ae070bcb47401766c48c9cb6488933f608f810fadae"},
|
2463 |
+
{file = "wordcloud-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0058cf08573c99283fe189e93354d20ca8c9a8aac7207d96e74b93aedd02cdcc"},
|
2464 |
+
{file = "wordcloud-1.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47d6918381a8a816141bdd391376bff703ec5aa3a6bd88631097a5e2963ebd1a"},
|
2465 |
+
{file = "wordcloud-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05aa3269c5af573cfb11e269de0fe73c2c72aefdd90cdb41368744e7d8bc7507"},
|
2466 |
+
{file = "wordcloud-1.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d74e206f42af172db4d3c0054853523bf46070b12f0626493a56599957dd2196"},
|
2467 |
+
{file = "wordcloud-1.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1932726635c8ed12bb74201d2a6b07f18c2f732aecadb9ae915832485241991f"},
|
2468 |
+
{file = "wordcloud-1.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:038de1701e7853c41850644453f1c9e69f878e480d42efae154684a47fd59f1a"},
|
2469 |
+
{file = "wordcloud-1.9.3-cp38-cp38-win32.whl", hash = "sha256:19aa05f60d9261301e4942fd1b1c4b458d903f24c12d2bd1c6ecbb752697a2f3"},
|
2470 |
+
{file = "wordcloud-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab5bae12cf27d8de986e4d4518d4778f2b56c660b250b631ff805024038311a1"},
|
2471 |
+
{file = "wordcloud-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:888d088f54a897b8597da2fae3954d74b1f7251f7d311bbcc30ec3c6987d3605"},
|
2472 |
+
{file = "wordcloud-1.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:daa6cfa11ce24e7eb4e42dc896dae4f74ae2166cf90ec997996300566e6811d1"},
|
2473 |
+
{file = "wordcloud-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387dc2bd528ff6bb661451f2a9fd4ccf74b86072d7a2c868285d4c0cf26abeb4"},
|
2474 |
+
{file = "wordcloud-1.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40c32a324319db610b40f387a2a0b42d091817958a5272e0a4c4eb6a158588b5"},
|
2475 |
+
{file = "wordcloud-1.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8078c6c58db4ccb893f120354e7e08bc48a5a5aac3e764f9008bc96a769b208c"},
|
2476 |
+
{file = "wordcloud-1.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81f15eb60abc1676808bb85e2edfdbdc0a9011383f2a729c1c2a0cb941516768"},
|
2477 |
+
{file = "wordcloud-1.9.3-cp39-cp39-win32.whl", hash = "sha256:1d1680bf6c3d1b2f8e3bd02ccfa868fee2655fe13cf5b9e9905251050448fbbd"},
|
2478 |
+
{file = "wordcloud-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:c0f458681e4d49be36064f21bfb1dc8d8c3021fe30e474ee634666b4f84fd851"},
|
2479 |
+
{file = "wordcloud-1.9.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:baea9ac88ec1ab317461c75834b64ad5dad12a02c4f2384dd546eac3c316dbbb"},
|
2480 |
+
{file = "wordcloud-1.9.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6956b9f0d0eb14a12f46d41aebb4e7ad2d4c2ec417cc7c586bebd2ddc9c8311"},
|
2481 |
+
{file = "wordcloud-1.9.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d221b4d0d1d2a1d79286c41d8a4c0ce70065488f153e5d81cc0be7fb494ff10f"},
|
2482 |
+
{file = "wordcloud-1.9.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:db39dbe91dd31ffb667edcd496f4eeb85ceea397fef4ad51d0766ab934088cc7"},
|
2483 |
+
{file = "wordcloud-1.9.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a6ae5db43807ca10f5c77dd2d22c78f8f9399758cc5ac6afd7f3c19e58b75d66"},
|
2484 |
+
{file = "wordcloud-1.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a1c431f20ee28a8840f2552a89bd8332c455c318f4de7b6c2ca3159b76df4f0"},
|
2485 |
+
{file = "wordcloud-1.9.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1847ca4466e2b1588478dd8eb87fa7baa28515b37ab7926471595e8ac81e6578"},
|
2486 |
+
{file = "wordcloud-1.9.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7b0e14e4dfcff7dee331df7880a2031e352e95a7d30e74ff152f162488b04179"},
|
2487 |
+
{file = "wordcloud-1.9.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f1c0cff6037a3dc46437537a31925f3895d742fb6d67af71194149763de16a76"},
|
2488 |
+
{file = "wordcloud-1.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a36788c5c79604653327675023cbd97c68813640887b51ce651bb4f5c28c88b"},
|
2489 |
+
{file = "wordcloud-1.9.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e3907c6496e197a9c4be76770c5ff8a03eddbdfe5a151a55e4eedeaa45ab3ad"},
|
2490 |
+
{file = "wordcloud-1.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:65e6f6b68eecb85c326ae19729dd4151fcdebffc2142c9ee882dc2de955210d0"},
|
2491 |
+
{file = "wordcloud-1.9.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0c8e18c4afa025819332efffe8008267a83a9c54fe72ae1bc889ddce0eec470d"},
|
2492 |
+
{file = "wordcloud-1.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4df25cb5dd347e43d53e02a009418f5776e7651063aff991865da8f6336bf193"},
|
2493 |
+
{file = "wordcloud-1.9.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53489ad22d58be3896ec16ed47604832e393224c89f7d7eed040096b07141ac4"},
|
2494 |
+
{file = "wordcloud-1.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:61de4a5f3bfd33e0cb013cce6143bcf71959f3cd8536650b90134d745a553c2c"},
|
2495 |
+
{file = "wordcloud-1.9.3.tar.gz", hash = "sha256:a9aa738d63ed674a40f0cc31adb83f4ca5fc195f03a6aff6e010d1f5807d1c58"},
|
2496 |
+
]
|
2497 |
+
|
2498 |
+
[package.dependencies]
|
2499 |
+
matplotlib = "*"
|
2500 |
+
numpy = ">=1.6.1"
|
2501 |
+
pillow = "*"
|
2502 |
+
|
2503 |
[[package]]
|
2504 |
name = "xxhash"
|
2505 |
version = "3.5.0"
|
|
|
2738 |
[metadata]
|
2739 |
lock-version = "2.0"
|
2740 |
python-versions = "^3.12"
|
2741 |
+
content-hash = "8542704f2fdef8c09d10c94785620326a8e2c72112368ee6f2e25fa45aeeb75a"
|
pyproject.toml
CHANGED
@@ -13,6 +13,7 @@ pandas = "^2.2.2"
|
|
13 |
nltk = "^3.9.1"
|
14 |
plotly = "^5.23.0"
|
15 |
matplotlib = "^3.9.2"
|
|
|
16 |
|
17 |
[build-system]
|
18 |
requires = ["poetry-core"]
|
|
|
13 |
nltk = "^3.9.1"
|
14 |
plotly = "^5.23.0"
|
15 |
matplotlib = "^3.9.2"
|
16 |
+
wordcloud = "^1.9.3"
|
17 |
|
18 |
[build-system]
|
19 |
requires = ["poetry-core"]
|