models-explorer / models.py
osanseviero's picture
Add language and license info
de86128
raw
history blame
7.5 kB
import streamlit as st
import pandas as pd
from datasets import load_dataset
from ast import literal_eval
import altair as alt
nlp_tasks = ["text-classification", "text-generation", "text2text-generation", "token-classification", "fill-mask", "question-answering"
"translation", "conversational", "sentence-similarity", "summarization", "multiple-choice", "zero-shot-classification", "table-question-answering"
]
audio_tasks = ["automatic-speech-recognition", "audio-classification", "text-to-speech", "audio-to-audio", "voice-activity-detection"]
cv_tasks = ["image-classification", "image-segmentation", "zero-shot-image-classification", "image-to-image", "unconditional-image-generation", "object-detection"]
multimodal = ["feature-extraction", "text-to-image", "visual-question-answering", "image-to-text", "document-question-answering"]
tabular = ["tabular-clasification", "tabular-regression"]
modalities = {
"nlp": nlp_tasks,
"audio": audio_tasks,
"cv": cv_tasks,
"multimodal": multimodal,
"tabular": tabular,
"rl": ["reinforcement-learning"]
}
def modality(row):
pipeline = row["pipeline"]
for modality, tasks in modalities.items():
if pipeline in tasks:
return modality
if type(pipeline) == "str":
return "unk_modality"
return None
supported_revisions = ["27_09_22"]
def process_dataset(version):
# Load dataset at specified revision
dataset = load_dataset("open-source-metrics/model-repos-stats", revision=version)
# Convert to pandas dataframe
data = dataset["train"].to_pandas()
# Add modality column
data["modality"] = data.apply(modality, axis=1)
# Bin the model card length into some bins
data["length_bins"] = pd.cut(data["text_length"], [0, 200, 1000, 2000, 3000, 4000, 5000, 7500, 10000, 20000, 50000])
return data
base = st.selectbox(
'What revision do you want to use',
supported_revisions)
data = process_dataset(base)
total_samples = data.shape[0]
st.metric(label="Total models", value=total_samples)
tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(["Language", "License", "Pipeline", "Discussion Features", "Libraries", "Model Cards", "Super users"])
with tab1:
st.header("Languages info")
data.loc[data.languages == "False", 'languages'] = None
data.loc[data.languages == {}, 'languages'] = None
no_lang_count = data["languages"].isna().sum()
data["languages"] = data["languages"].fillna('none')
def make_list(row):
languages = row["languages"]
if languages == "none":
return []
return literal_eval(languages)
def language_count(row):
languages = row["languages"]
leng = len(languages)
return leng
data["languages"] = data.apply(make_list, axis=1)
data["repos_count"] = data.apply(language_count, axis=1)
models_with_langs = data[data["repos_count"] > 0]
langs = models_with_langs["languages"].explode()
langs = langs[langs != {}]
total_langs = len(langs.unique())
col1, col2, col3 = st.columns(3)
with col1:
st.metric(label="Language Specified", value=total_samples-no_lang_count)
with col2:
st.metric(label="No Language Specified", value=no_lang_count)
with col3:
st.metric(label="Total Unique Languages", value=total_langs)
st.subheader("Distribution of languages per model repo")
linguality = st.selectbox(
'All or just Multilingual',
["All", "Just Multilingual", "Three or more languages"])
filter = 0
if linguality == "Just Multilingual":
filter = 1
elif linguality == "Three or more languages":
filter = 2
models_with_langs = data[data["repos_count"] > filter]
df1 = models_with_langs['repos_count'].value_counts()
st.bar_chart(df1)
st.subheader("Distribution of repos per language")
linguality_2 = st.selectbox(
'All or filtered',
["All", "No English", "Remove top 10"])
filter = 0
if linguality_2 == "All":
filter = 0
elif linguality_2 == "No English":
filter = 1
else:
filter = 2
models_with_langs = data[data["repos_count"] > 0]
langs = models_with_langs["languages"].explode()
langs = langs[langs != {}]
d = langs.value_counts().rename_axis("language").to_frame('counts').reset_index()
if filter == 1:
d = d.iloc[1:]
elif filter == 2:
d = d.iloc[10:]
# Just keep top 25 to avoid vertical scroll
d = d.iloc[:25]
st.write(alt.Chart(d).mark_bar().encode(
x='counts',
y=alt.X('language', sort=None)
))
st.subheader("Raw Data")
col1, col2 = st.columns(2)
with col1:
st.dataframe(df1)
with col2:
d = langs.value_counts().rename_axis("language").to_frame('counts').reset_index()
st.dataframe(d)
with tab2:
st.header("License info")
no_license_count = data["license"].isna().sum()
col1, col2, col3 = st.columns(3)
with col1:
st.metric(label="License Specified", value=total_samples-no_license_count)
with col2:
st.metric(label="No license Specified", value=no_license_count)
with col3:
st.metric(label="Total Unique Licenses", value=len(data["license"].unique()))
st.subheader("Distribution of licenses per model repo")
license_filter = st.selectbox(
'All or filtered',
["All", "No Apache 2.0", "Remove top 10"])
filter = 0
if license_filter == "All":
filter = 0
elif license_filter == "No Apache 2.0":
filter = 1
else:
filter = 2
d = data["license"].value_counts().rename_axis("license").to_frame('counts').reset_index()
if filter == 1:
d = d.iloc[1:]
elif filter == 2:
d = d.iloc[10:]
# Just keep top 25 to avoid vertical scroll
d = d.iloc[:25]
st.write(alt.Chart(d).mark_bar().encode(
x='counts',
y=alt.X('license', sort=None)
))
st.text("There are some edge cases, as old repos using lists of licenses. We are working on fixing this.")
st.subheader("Raw Data")
d = data["license"].value_counts().rename_axis("license").to_frame('counts').reset_index()
st.dataframe(d)
with tab3:
st.header("Pipeline info")
no_pipeline_count = data["pipeline"].isna().sum()
col1, col2, col3 = st.columns(3)
with col1:
st.metric(label="Pipeline Specified", value=total_samples-no_pipeline_count)
with col2:
st.metric(label="No pipeline Specified", value=no_pipeline_count)
with col3:
st.metric(label="Total Unique Pipelines", value=len(data["pipeline"].unique()))
st.subheader("Distribution of pipelines per model repo")
pipeline_filter = st.selectbox(
'All or filtered',
["All", "NLP", "CV", "Audio", "RL", "Multimodal", "Tabular"])
filter = 0
if pipeline_filter == "All":
filter = 0
elif pipeline_filter == "NLP":
filter = 1
elif pipeline_filter == "CV":
filter = 2
elif pipeline_filter == "Audio":
filter = 3
elif pipeline_filter == "RL":
filter = 4
elif pipeline_filter == "Multimodal":
filter = 5
elif pipeline_filter == "Tabular":
filter = 6
d = data["pipeline"].value_counts().rename_axis("pipeline").to_frame('counts').reset_index()
st.write(alt.Chart(d).mark_bar().encode(
x='counts',
y=alt.X('pipeline', sort=None)
))