osanseviero HF staff commited on
Commit
de86128
1 Parent(s): ae554ae

Add language and license info

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. models.py +259 -0
  3. requirements.txt +1 -0
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: indigo
5
  colorTo: blue
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
- app_file: app.py
9
  pinned: false
10
  ---
11
 
 
5
  colorTo: blue
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
+ app_file: models.py
9
  pinned: false
10
  ---
11
 
models.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from datasets import load_dataset
4
+ from ast import literal_eval
5
+ import altair as alt
6
+
7
+ nlp_tasks = ["text-classification", "text-generation", "text2text-generation", "token-classification", "fill-mask", "question-answering"
8
+ "translation", "conversational", "sentence-similarity", "summarization", "multiple-choice", "zero-shot-classification", "table-question-answering"
9
+ ]
10
+ audio_tasks = ["automatic-speech-recognition", "audio-classification", "text-to-speech", "audio-to-audio", "voice-activity-detection"]
11
+ cv_tasks = ["image-classification", "image-segmentation", "zero-shot-image-classification", "image-to-image", "unconditional-image-generation", "object-detection"]
12
+ multimodal = ["feature-extraction", "text-to-image", "visual-question-answering", "image-to-text", "document-question-answering"]
13
+ tabular = ["tabular-clasification", "tabular-regression"]
14
+
15
+ modalities = {
16
+ "nlp": nlp_tasks,
17
+ "audio": audio_tasks,
18
+ "cv": cv_tasks,
19
+ "multimodal": multimodal,
20
+ "tabular": tabular,
21
+ "rl": ["reinforcement-learning"]
22
+ }
23
+
24
+ def modality(row):
25
+ pipeline = row["pipeline"]
26
+ for modality, tasks in modalities.items():
27
+ if pipeline in tasks:
28
+ return modality
29
+ if type(pipeline) == "str":
30
+ return "unk_modality"
31
+ return None
32
+
33
+ supported_revisions = ["27_09_22"]
34
+
35
+ def process_dataset(version):
36
+ # Load dataset at specified revision
37
+ dataset = load_dataset("open-source-metrics/model-repos-stats", revision=version)
38
+
39
+ # Convert to pandas dataframe
40
+ data = dataset["train"].to_pandas()
41
+
42
+ # Add modality column
43
+ data["modality"] = data.apply(modality, axis=1)
44
+
45
+ # Bin the model card length into some bins
46
+ data["length_bins"] = pd.cut(data["text_length"], [0, 200, 1000, 2000, 3000, 4000, 5000, 7500, 10000, 20000, 50000])
47
+
48
+ return data
49
+
50
+ base = st.selectbox(
51
+ 'What revision do you want to use',
52
+ supported_revisions)
53
+ data = process_dataset(base)
54
+
55
+ total_samples = data.shape[0]
56
+ st.metric(label="Total models", value=total_samples)
57
+
58
+ tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(["Language", "License", "Pipeline", "Discussion Features", "Libraries", "Model Cards", "Super users"])
59
+
60
+ with tab1:
61
+ st.header("Languages info")
62
+
63
+ data.loc[data.languages == "False", 'languages'] = None
64
+ data.loc[data.languages == {}, 'languages'] = None
65
+
66
+ no_lang_count = data["languages"].isna().sum()
67
+ data["languages"] = data["languages"].fillna('none')
68
+
69
+ def make_list(row):
70
+ languages = row["languages"]
71
+ if languages == "none":
72
+ return []
73
+ return literal_eval(languages)
74
+
75
+ def language_count(row):
76
+ languages = row["languages"]
77
+ leng = len(languages)
78
+ return leng
79
+
80
+ data["languages"] = data.apply(make_list, axis=1)
81
+ data["repos_count"] = data.apply(language_count, axis=1)
82
+
83
+ models_with_langs = data[data["repos_count"] > 0]
84
+ langs = models_with_langs["languages"].explode()
85
+ langs = langs[langs != {}]
86
+ total_langs = len(langs.unique())
87
+
88
+ col1, col2, col3 = st.columns(3)
89
+ with col1:
90
+ st.metric(label="Language Specified", value=total_samples-no_lang_count)
91
+ with col2:
92
+ st.metric(label="No Language Specified", value=no_lang_count)
93
+ with col3:
94
+ st.metric(label="Total Unique Languages", value=total_langs)
95
+
96
+ st.subheader("Distribution of languages per model repo")
97
+ linguality = st.selectbox(
98
+ 'All or just Multilingual',
99
+ ["All", "Just Multilingual", "Three or more languages"])
100
+
101
+ filter = 0
102
+ if linguality == "Just Multilingual":
103
+ filter = 1
104
+ elif linguality == "Three or more languages":
105
+ filter = 2
106
+
107
+ models_with_langs = data[data["repos_count"] > filter]
108
+ df1 = models_with_langs['repos_count'].value_counts()
109
+ st.bar_chart(df1)
110
+
111
+ st.subheader("Distribution of repos per language")
112
+ linguality_2 = st.selectbox(
113
+ 'All or filtered',
114
+ ["All", "No English", "Remove top 10"])
115
+
116
+ filter = 0
117
+ if linguality_2 == "All":
118
+ filter = 0
119
+ elif linguality_2 == "No English":
120
+ filter = 1
121
+ else:
122
+ filter = 2
123
+
124
+ models_with_langs = data[data["repos_count"] > 0]
125
+ langs = models_with_langs["languages"].explode()
126
+ langs = langs[langs != {}]
127
+
128
+ d = langs.value_counts().rename_axis("language").to_frame('counts').reset_index()
129
+ if filter == 1:
130
+ d = d.iloc[1:]
131
+ elif filter == 2:
132
+ d = d.iloc[10:]
133
+
134
+ # Just keep top 25 to avoid vertical scroll
135
+ d = d.iloc[:25]
136
+
137
+ st.write(alt.Chart(d).mark_bar().encode(
138
+ x='counts',
139
+ y=alt.X('language', sort=None)
140
+ ))
141
+
142
+ st.subheader("Raw Data")
143
+ col1, col2 = st.columns(2)
144
+ with col1:
145
+ st.dataframe(df1)
146
+ with col2:
147
+ d = langs.value_counts().rename_axis("language").to_frame('counts').reset_index()
148
+ st.dataframe(d)
149
+
150
+
151
+
152
+ with tab2:
153
+ st.header("License info")
154
+
155
+ no_license_count = data["license"].isna().sum()
156
+ col1, col2, col3 = st.columns(3)
157
+ with col1:
158
+ st.metric(label="License Specified", value=total_samples-no_license_count)
159
+ with col2:
160
+ st.metric(label="No license Specified", value=no_license_count)
161
+ with col3:
162
+ st.metric(label="Total Unique Licenses", value=len(data["license"].unique()))
163
+
164
+ st.subheader("Distribution of licenses per model repo")
165
+ license_filter = st.selectbox(
166
+ 'All or filtered',
167
+ ["All", "No Apache 2.0", "Remove top 10"])
168
+
169
+ filter = 0
170
+ if license_filter == "All":
171
+ filter = 0
172
+ elif license_filter == "No Apache 2.0":
173
+ filter = 1
174
+ else:
175
+ filter = 2
176
+
177
+ d = data["license"].value_counts().rename_axis("license").to_frame('counts').reset_index()
178
+ if filter == 1:
179
+ d = d.iloc[1:]
180
+ elif filter == 2:
181
+ d = d.iloc[10:]
182
+
183
+ # Just keep top 25 to avoid vertical scroll
184
+ d = d.iloc[:25]
185
+
186
+ st.write(alt.Chart(d).mark_bar().encode(
187
+ x='counts',
188
+ y=alt.X('license', sort=None)
189
+ ))
190
+ st.text("There are some edge cases, as old repos using lists of licenses. We are working on fixing this.")
191
+
192
+
193
+ st.subheader("Raw Data")
194
+ d = data["license"].value_counts().rename_axis("license").to_frame('counts').reset_index()
195
+ st.dataframe(d)
196
+
197
+ with tab3:
198
+ st.header("Pipeline info")
199
+
200
+ no_pipeline_count = data["pipeline"].isna().sum()
201
+ col1, col2, col3 = st.columns(3)
202
+ with col1:
203
+ st.metric(label="Pipeline Specified", value=total_samples-no_pipeline_count)
204
+ with col2:
205
+ st.metric(label="No pipeline Specified", value=no_pipeline_count)
206
+ with col3:
207
+ st.metric(label="Total Unique Pipelines", value=len(data["pipeline"].unique()))
208
+
209
+ st.subheader("Distribution of pipelines per model repo")
210
+ pipeline_filter = st.selectbox(
211
+ 'All or filtered',
212
+ ["All", "NLP", "CV", "Audio", "RL", "Multimodal", "Tabular"])
213
+
214
+ filter = 0
215
+ if pipeline_filter == "All":
216
+ filter = 0
217
+ elif pipeline_filter == "NLP":
218
+ filter = 1
219
+ elif pipeline_filter == "CV":
220
+ filter = 2
221
+ elif pipeline_filter == "Audio":
222
+ filter = 3
223
+ elif pipeline_filter == "RL":
224
+ filter = 4
225
+ elif pipeline_filter == "Multimodal":
226
+ filter = 5
227
+ elif pipeline_filter == "Tabular":
228
+ filter = 6
229
+
230
+ d = data["pipeline"].value_counts().rename_axis("pipeline").to_frame('counts').reset_index()
231
+
232
+ st.write(alt.Chart(d).mark_bar().encode(
233
+ x='counts',
234
+ y=alt.X('pipeline', sort=None)
235
+ ))
236
+
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ datasets