danielhn commited on
Commit
156ecbd
1 Parent(s): afae257

Initial test

Browse files
Files changed (3) hide show
  1. .streamlit/config.toml +2 -0
  2. app.py +388 -0
  3. reports/daily/2023-01-01.csv +112 -0
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [theme]
2
+ base="dark"
app.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time # to simulate a real time data, time loop
2
+ from os import listdir
3
+ from os.path import isfile, join
4
+ import numpy as np # np mean, np random
5
+ import pandas as pd # read csv, df manipulation
6
+ import plotly.express as px # interactive charts
7
+ from plotly import graph_objs as go
8
+ import streamlit as st # 🎈 data web app development
9
+ import plotly.figure_factory as ff
10
+ import numpy as np
11
+ from collections import Counter
12
+
13
+
14
+ print("Make sure to activate your VPN before running this script")
15
+
16
+ st.set_page_config(
17
+ page_title="GroqFlow Progress Tracker",
18
+ page_icon="🚀",
19
+ layout="wide",
20
+ )
21
+
22
+
23
+ # Session State variables:
24
+ state = st.session_state
25
+ if "INFO_CLOSED" not in state:
26
+ state.INFO_CLOSED = False
27
+
28
+ # dashboard title
29
+ st.title("GroqFlow Progress Tracker 🚀")
30
+
31
+ # Custom chart colors (https://plotly.com/python/discrete-color/)
32
+ colorway = ["#3366cc", "#FF7F0E"]
33
+
34
+
35
+ def add_filter(data_frame_list, name, label, options, num_cols=1):
36
+ st.markdown(f"#### {name}")
37
+
38
+ cols = st.columns(num_cols)
39
+ instantiated_checkbox = []
40
+ for idx in range(len(options)):
41
+ with cols[idx % num_cols]:
42
+ instantiated_checkbox.append(st.checkbox(options[idx], False))
43
+
44
+ all_options = set(data_frame_list[-1][label])
45
+ selected_options = [
46
+ options[idx] for idx, checked in enumerate(instantiated_checkbox) if checked
47
+ ]
48
+
49
+ # The last checkbox will always correspond to "other"
50
+ if instantiated_checkbox[-1]:
51
+ selected_options = selected_options[:-1]
52
+ other_options = [x for x in all_options if x not in options]
53
+ selected_options = set(selected_options + other_options)
54
+
55
+ if len(selected_options) > 0:
56
+ for idx in range(len(data_frame_list)):
57
+ data_frame_list[idx] = data_frame_list[idx][
58
+ [
59
+ any([x == model_entry for x in selected_options])
60
+ for model_entry in data_frame_list[idx][label]
61
+ ]
62
+ ]
63
+ return data_frame_list
64
+
65
+
66
+ with st.sidebar:
67
+
68
+ st.markdown("# Filters")
69
+
70
+ test_type = st.radio(
71
+ "Test Type",
72
+ ("Daily Tests (100 models)", "Monthly Tests (500+ models)"),
73
+ )
74
+ if test_type == "Daily Tests (100 models)":
75
+ selected_test_type = "daily"
76
+ report_folder = "reports/daily"
77
+ else:
78
+ selected_test_type = "monthly"
79
+ report_folder = "reports/monthly"
80
+
81
+ # Get ML Agility reports
82
+ reports = sorted(
83
+ [f for f in listdir(report_folder) if isfile(join(report_folder, f))]
84
+ )
85
+
86
+ selected_report = st.selectbox("Test date", reports, index=len(reports) - 1)
87
+ selected_report_idx = reports.index(selected_report)
88
+ prev_report = reports[max(0, selected_report_idx - 1)]
89
+ mla_report = pd.read_csv(f"{report_folder}/{selected_report}")
90
+ prev_mla_report = pd.read_csv(f"{report_folder}/{prev_report}")
91
+
92
+ # Add chips filter
93
+ num_chips_options = ["1", "2", "4", "8", "16", "32+"]
94
+ mla_report = mla_report.astype({"chips_used": str})
95
+ prev_mla_report = prev_mla_report.astype({"chips_used": str})
96
+ mla_report, prev_mla_report = add_filter(
97
+ [mla_report, prev_mla_report],
98
+ "Number of GroqChips™",
99
+ label="chips_used",
100
+ options=num_chips_options,
101
+ num_cols=3,
102
+ )
103
+
104
+ # Add author filter
105
+ authors = [
106
+ "google",
107
+ "apple",
108
+ "facebook",
109
+ "openai",
110
+ "microsoft",
111
+ "huggingface",
112
+ "CompVis",
113
+ "others",
114
+ ]
115
+ mla_report, prev_mla_report = add_filter(
116
+ [mla_report, prev_mla_report],
117
+ "Authors",
118
+ label="author",
119
+ options=authors,
120
+ num_cols=2,
121
+ )
122
+
123
+ # Add task filter
124
+ tasks = [
125
+ "Image Classification",
126
+ "Translation",
127
+ "Image Segmentation",
128
+ "Fill-Mask",
129
+ "Text-to-Image",
130
+ "Token Classification",
131
+ "Sentence Similarity",
132
+ "Audio Classification",
133
+ "Question Answering",
134
+ "Summarization",
135
+ "other",
136
+ ]
137
+ mla_report, prev_mla_report = add_filter(
138
+ [mla_report, prev_mla_report], "Tasks", label="task", options=tasks
139
+ )
140
+
141
+
142
+ def detailed_progress_list(df_new, df_old, filter=None):
143
+ return
144
+ """
145
+ if filter is not None:
146
+ df_new = df_new[(df_new[filter] == True)]
147
+ df_old = df_old[(df_old[filter] == True)]
148
+
149
+ progress = df_new[~(df_new["hash"].isin(df_old["hash"]))].reset_index(drop=True)
150
+ regression = df_old[~(df_old["hash"].isin(df_new["hash"]))].reset_index(drop=True)
151
+
152
+ for model_name in progress["model_name"]:
153
+ st.markdown(
154
+ f'<span style="color:green">↑ {model_name}</span>',
155
+ unsafe_allow_html=True,
156
+ )
157
+ for model_name in regression["model_name"]:
158
+ st.markdown(
159
+ f'<span style="color:red">↓ {model_name}</span>',
160
+ unsafe_allow_html=True,
161
+ )
162
+ """
163
+
164
+
165
+ # creating a single-element container
166
+ placeholder = st.empty()
167
+
168
+ with placeholder.container():
169
+
170
+ st.markdown("## Summary Results")
171
+ # create three columns
172
+ kpi = st.columns(7)
173
+ model_details = st.columns(7)
174
+
175
+ # fill in those three columns with respective metrics or KPIs
176
+ kpi[0].metric(
177
+ label="All models",
178
+ value=len(mla_report),
179
+ delta=len(mla_report) - len(prev_mla_report),
180
+ )
181
+ if selected_test_type == "daily":
182
+ with model_details[0]:
183
+ detailed_progress_list(mla_report, prev_mla_report)
184
+
185
+ kpi[1].metric(
186
+ label="Convert to ONNX",
187
+ value=np.sum(mla_report["base_onnx"]),
188
+ delta=int(
189
+ np.sum(mla_report["base_onnx"]) - np.sum(prev_mla_report["base_onnx"])
190
+ ),
191
+ )
192
+ if selected_test_type == "daily":
193
+ with model_details[1]:
194
+ detailed_progress_list(mla_report, prev_mla_report, "base_onnx")
195
+
196
+ kpi[2].metric(
197
+ label="Optimize ONNX file",
198
+ value=np.sum(mla_report["optimized_onnx"]),
199
+ delta=int(
200
+ np.sum(mla_report["optimized_onnx"])
201
+ - np.sum(prev_mla_report["optimized_onnx"])
202
+ ),
203
+ )
204
+ if selected_test_type == "daily":
205
+ with model_details[2]:
206
+ detailed_progress_list(mla_report, prev_mla_report, "optimized_onnx")
207
+
208
+ kpi[3].metric(
209
+ label="All ops supported",
210
+ value=np.sum(mla_report["all_ops_supported"]),
211
+ delta=int(
212
+ np.sum(mla_report["all_ops_supported"])
213
+ - np.sum(prev_mla_report["all_ops_supported"])
214
+ ),
215
+ )
216
+ if selected_test_type == "daily":
217
+ with model_details[3]:
218
+ detailed_progress_list(mla_report, prev_mla_report, "all_ops_supported")
219
+
220
+ kpi[4].metric(
221
+ label="Converts to FP16",
222
+ value=np.sum(mla_report["fp16_onnx"]),
223
+ delta=int(
224
+ np.sum(mla_report["fp16_onnx"]) - np.sum(prev_mla_report["fp16_onnx"])
225
+ ),
226
+ )
227
+ if selected_test_type == "daily":
228
+ with model_details[4]:
229
+ detailed_progress_list(mla_report, prev_mla_report, "fp16_onnx")
230
+
231
+ kpi[5].metric(
232
+ label="Compiles",
233
+ value=np.sum(mla_report["compiles"]),
234
+ delta=int(np.sum(mla_report["compiles"]) - np.sum(prev_mla_report["compiles"])),
235
+ )
236
+ if selected_test_type == "daily":
237
+ with model_details[5]:
238
+ detailed_progress_list(mla_report, prev_mla_report, "compiles")
239
+
240
+ kpi[6].metric(
241
+ label="Assembles",
242
+ value=np.sum(mla_report["assembles"]),
243
+ delta=int(
244
+ np.sum(mla_report["assembles"]) - np.sum(prev_mla_report["assembles"])
245
+ ),
246
+ )
247
+ if selected_test_type == "daily":
248
+ with model_details[6]:
249
+ detailed_progress_list(mla_report, prev_mla_report, "assembles")
250
+
251
+ cols = st.columns(2)
252
+ with cols[0]:
253
+
254
+ compiler_errors = mla_report[mla_report["compiler_error"] != "-"][
255
+ "compiler_error"
256
+ ]
257
+ compiler_errors = Counter(compiler_errors)
258
+ st.markdown("""#### Top compiler issues""")
259
+ if len(compiler_errors) > 0:
260
+ compiler_errors = pd.DataFrame.from_dict(
261
+ compiler_errors, orient="index"
262
+ ).reset_index()
263
+ compiler_errors = compiler_errors.set_axis(
264
+ ["error", "count"], axis=1, inplace=False
265
+ )
266
+
267
+ fig = px.bar(
268
+ compiler_errors, x="count", y="error", orientation="h", height=400
269
+ )
270
+ st.plotly_chart(fig, use_container_width=True)
271
+ else:
272
+ st.markdown("""No compiler errors found :tada:""")
273
+
274
+ with cols[1]:
275
+ # Add parameters histogram
276
+ all_models = [float(x) / 1000000 for x in mla_report["params"] if x != "-"]
277
+
278
+ assembled_models = mla_report[mla_report["assembles"] == True]
279
+ assembled_models = [
280
+ float(x) / 1000000 for x in assembled_models["params"] if x != "-"
281
+ ]
282
+ hist_data = []
283
+ group_labels = []
284
+ if all_models != []:
285
+ hist_data.append(all_models)
286
+ group_labels.append("Models we tried compiling")
287
+
288
+ if assembled_models != []:
289
+ hist_data.append(assembled_models)
290
+ group_labels.append("Assembled models")
291
+
292
+ st.markdown("""#### Assembled models vs. Parameters (in millions)""")
293
+
294
+ if len(assembled_models) > 1:
295
+
296
+ fig = ff.create_distplot(
297
+ hist_data,
298
+ group_labels,
299
+ bin_size=[25, 25],
300
+ histnorm="",
301
+ )
302
+ # fig.layout.update(title="Assembled models vs. Parameters (in millions)")
303
+ fig.layout.update(xaxis_title="Parameters in millions")
304
+ fig.layout.update(yaxis_title="count")
305
+ fig.update_xaxes(range=[1, 1000])
306
+ st.plotly_chart(fig, use_container_width=True)
307
+ else:
308
+ st.markdown("""Need at least one assembled model to show this graph 😅""")
309
+
310
+ if "tsp_gpu_compute_ratio" in mla_report and "tsp_gpu_e2e_ratio" in mla_report:
311
+ cols = st.columns(2)
312
+ with cols[0]:
313
+ # GPU Acceleration plot
314
+ st.markdown("""#### Speedup of GroqChip™ compared to A100 GPUs""")
315
+
316
+ # Prepare data
317
+ df = mla_report[
318
+ ["model_name", "tsp_gpu_compute_ratio", "tsp_gpu_e2e_ratio"]
319
+ ]
320
+ df = df.sort_values(by=["model_name"])
321
+ df = df[(df.tsp_gpu_compute_ratio != "-")]
322
+ df = df[(df.tsp_gpu_e2e_ratio != "-")]
323
+ df["tsp_gpu_compute_ratio"] = df["tsp_gpu_compute_ratio"].astype(float)
324
+ df["tsp_gpu_e2e_ratio"] = df["tsp_gpu_e2e_ratio"].astype(float)
325
+
326
+ data = [
327
+ go.Bar(
328
+ x=df["model_name"],
329
+ y=df["tsp_gpu_compute_ratio"],
330
+ name="Compute only",
331
+ ),
332
+ go.Bar(
333
+ x=df["model_name"],
334
+ y=df["tsp_gpu_e2e_ratio"],
335
+ name="Compute + estimated I/O",
336
+ ),
337
+ ]
338
+
339
+ layout = go.Layout(
340
+ barmode="overlay",
341
+ yaxis_title="Speedup compared to A100 GPU",
342
+ colorway=colorway,
343
+ )
344
+
345
+ fig = dict(data=data, layout=layout)
346
+ st.plotly_chart(fig, use_container_width=True)
347
+
348
+ st.markdown(
349
+ "<sup>*</sup>Estimated I/O does NOT include delays caused by Groq's runtime.",
350
+ unsafe_allow_html=True,
351
+ )
352
+
353
+ with cols[1]:
354
+ # Show stats
355
+ st.markdown(
356
+ f"""<br><br><br><br><br><br>
357
+ <p style="font-family:sans-serif; font-size: 20px;text-align: center;">Average speedup of GroqChip™ considering compute only:</p>
358
+ <p style="font-family:sans-serif; color:#3366cc; font-size: 26px;text-align: center;"> {round(df["tsp_gpu_compute_ratio"].mean(),2)}x</p>
359
+ <p style="font-family:sans-serif; color:#3366cc; font-size: 20px;text-align: center;"> min {round(df["tsp_gpu_compute_ratio"].min(),2)}x; max {round(df["tsp_gpu_compute_ratio"].max(),2)}x</p>
360
+ <br><br>
361
+ <p style="font-family:sans-serif; font-size: 20px;text-align: center;">Average speedup of GroqChip™ considering compute + estimated I/O<sup>*</sup>:</p>
362
+ <p style="font-family:sans-serif; color:#FF7F0E; font-size: 26px;text-align: center;"> {round(df["tsp_gpu_e2e_ratio"].mean(),2)}x</p>
363
+ <p style="font-family:sans-serif; color:#FF7F0E; font-size: 20px;text-align: center;"> min {round(df["tsp_gpu_e2e_ratio"].min(),2)}x; max {round(df["tsp_gpu_e2e_ratio"].max(),2)}x</p>""",
364
+ unsafe_allow_html=True,
365
+ )
366
+
367
+ st.markdown("### Detailed Data View")
368
+ st.markdown(
369
+ "**Model selection**: All workloads were obtained from models cards available at huggingface.co/models. Input shapes corresponds exactly to those used by the Huggingface model cards. Some of those input shapes might be small, causing the compilation process to be easier than when reasonably-sized input shapes are used.",
370
+ unsafe_allow_html=True,
371
+ )
372
+ model_name = st.text_input("", placeholder="Filter model by name")
373
+ if model_name != "":
374
+ mla_report = mla_report[[model_name in x for x in mla_report["model_name"]]]
375
+
376
+ # Select which columns to show
377
+ selected_cols = list(mla_report.columns)
378
+ # remove_cols = (
379
+ # "tsp_e2e_latency",
380
+ # "gpu_e2e_latency",
381
+ # "tsp_gpu_e2e_ratio",
382
+ # )
383
+ # for item in remove_cols:
384
+ # if item in selected_cols:
385
+ # selected_cols.remove(item)
386
+ st.dataframe(
387
+ mla_report[selected_cols], height=min((len(mla_report) + 1) * 35, 35 * 21)
388
+ )
reports/daily/2023-01-01.csv ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_name,author,class,downloads,base_onnx,optimized_onnx,all_ops_supported,fp16_onnx,compiles,assembles,params,chips_used,hash,license,task,model_type,cycles,tsp_compute_latency,gpu_compute_latency,tsp_gpu_compute_ratio,tsp_estimated_e2e_latency,gpu_e2e_latency,tsp_gpu_e2e_ratio,compiler_error,export_time,optimize_onnx_time,check_compatibility_time,fp16_conversion_time,compile_time,assemble_time,compiler_ram_GB
2
+ ldm-text2im-large-256,CompVis,LDMBertModel,2736,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,542895638,0,5a193210,apache-2.0,Text-to-Image,pytorch,-,-,-,-,-,-,-,-,,,,,,,
3
+ ldm-text2im-large-256,CompVis,UNet2DConditionModel,2736,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,872305830,0,d4c354d4,apache-2.0,Text-to-Image,pytorch,-,-,-,-,-,-,-,-,,,,,,,
4
+ stable-diffusion-v1-4,CompVis,UNet2DConditionModel,933179,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,859526310,0,8d97aa42,creativeml-openrail-m,Text-to-Image,pytorch,-,-,-,-,-,-,-,-,,,,,,,
5
+ stable-diffusion-v1-4,CompVis,CLIPTextModel,933179,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,123066514,0,d312ecd1,creativeml-openrail-m,Text-to-Image,pytorch,-,-,-,-,-,-,-,-,,,,,,,
6
+ deeplabv3-mobilevit-small,apple,MobileViTForSemanticSegmentation,623,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,6351055,0,5621d1d8,other,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
7
+ deeplabv3-mobilevit-xx-small,apple,MobileViTForSemanticSegmentation,296,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,1851719,0,535af098,other,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
8
+ mobilevit-small,apple,MobileViTForImageClassification,2156,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,5572645,0,14ad46bb,other,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
9
+ mobilevit-xx-small,apple,MobileViTForImageClassification,347,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,1270109,0,6ced4e0a,other,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
10
+ bart-base,facebook,BartModel,4287565,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,137857028,0,ccd3382a,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
11
+ bart-large,facebook,BartModel,523031,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,404206966,0,cb0751ce,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
12
+ contriever-msmarco,facebook,BertModel,640510,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,109112174,0,d59172a2,-,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
13
+ contriever,facebook,BertModel,11989,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,109112174,0,d59172a2,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
14
+ convnext-base-224,facebook,ConvNextForImageClassification,1195,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,88591654,0,7ab00a65,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
15
+ convnext-base-384,facebook,ConvNextForImageClassification,503,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,88591654,0,7ab00a65,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
16
+ convnext-large-224-22k-1k,facebook,ConvNextForImageClassification,532,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,197767526,0,fb35dbce,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
17
+ convnext-small-224,facebook,ConvNextForImageClassification,1084,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,50223878,0,87bede4e,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
18
+ convnext-tiny-224,facebook,ConvNextForImageClassification,7627,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,28589228,0,753bc122,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
19
+ convnext-xlarge-224-22k,facebook,ConvNextForImageClassification,950,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,392900367,0,8bc87977,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
20
+ convnext-xlarge-384-22k-1k,facebook,ConvNextForImageClassification,1487,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,350197158,0,b07800d5,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
21
+ data2vec-vision-base-ft1k,facebook,Data2VecVisionForImageClassification,896,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,92014184,0,69cd45e4,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
22
+ deit-base-distilled-patch16-224,facebook,DeiTForImageClassificationWithTeacher,3896,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,87338303,0,d5e17c06,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
23
+ deit-base-distilled-patch16-384,facebook,DeiTForImageClassificationWithTeacher,1089,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,87630143,0,d5e17c06,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
24
+ deit-base-patch16-224,facebook,ViTForImageClassification,1627,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86567765,0,8fa842d1,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
25
+ deit-base-patch16-384,facebook,ViTForImageClassification,249,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86859605,0,8fa842d1,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
26
+ deit-small-distilled-patch16-224,facebook,DeiTForImageClassificationWithTeacher,4774,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,22436543,0,39d02956,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
27
+ deit-small-patch16-224,facebook,ViTForImageClassification,2221,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,22050773,0,75dcf183,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
28
+ deit-tiny-distilled-patch16-224,facebook,DeiTForImageClassificationWithTeacher,554,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,5910911,0,a22960fb,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
29
+ deit-tiny-patch16-224,facebook,ViTForImageClassification,1605,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,5717525,0,4f7bba18,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
30
+ dino-vitb16,facebook,ViTModel,5486,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86389357,0,993623dd,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
31
+ dino-vitb8,facebook,ViTModel,631,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86398573,0,e9f1512a,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
32
+ dino-vits16,facebook,ViTModel,352,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,21813613,0,257fd398,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
33
+ dino-vits8,facebook,ViTModel,291,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,21818221,0,825fd897,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
34
+ flava-full,facebook,FlavaModel,5282,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,239843835,0,f54edd4f,bsd-3-clause,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
35
+ levit-128S,facebook,LevitForImageClassificationWithTeacher,1379,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,75ce3c61,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
36
+ maskformer-swin-base-ade,facebook,MaskFormerForInstanceSegmentation,915,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,119679086,0,435797ea,apache-2.0,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
37
+ maskformer-swin-base-coco,facebook,MaskFormerForInstanceSegmentation,2485,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,119679086,0,435797ea,apache-2.0,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
38
+ maskformer-swin-small-coco,facebook,MaskFormerForInstanceSegmentation,644,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,119679086,0,435797ea,apache-2.0,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
39
+ maskformer-swin-tiny-ade,facebook,MaskFormerForInstanceSegmentation,957,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,119679086,0,435797ea,apache-2.0,Image Segmentation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
40
+ mbart-large-50,facebook,MBartForConditionalGeneration,750716,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,865117055,0,cc870534,mit,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
41
+ opt-125m,facebook,OPTForCausalLM,228909,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,163848370,0,6cd79533,other,Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
42
+ opt-350m,facebook,OPTForCausalLM,108185,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,356887800,0,ad0ef94a,other,Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
43
+ regnet-y-040,facebook,RegNetForImageClassification,694,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,20615520,0,e61a4c01,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
44
+ vit-mae-base,facebook,ViTMAEForPreTraining,11994,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,e6e74056,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
45
+ vit-mae-large,facebook,ViTMAEForPreTraining,5655,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,affe8660,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
46
+ xlm-roberta-xl,facebook,XLMRobertaXLForMaskedLM,958,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,4125012789,0,24c40de1,mit,Fill-Mask,pytorch,-,-,-,-,-,-,-,-,,,,,,,
47
+ bert2bert L-24 wmt de en,google,BertGenerationEncoder,1524,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,335040717,0,d49341c1,apache-2.0,Translation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
48
+ byt5-base,google,T5ForConditionalGeneration,3256,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,581780174,0,e9c73447,apache-2.0,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
49
+ byt5-large,google,T5ForConditionalGeneration,780,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,1228479606,0,1ca21db0,apache-2.0,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
50
+ byt5-small,google,T5ForConditionalGeneration,41266,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,299685500,0,2.83E+14,apache-2.0,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
51
+ canine-c,google,CanineModel,1775,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,51c875ff,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
52
+ canine-s,google,CanineModel,10734,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,51c875ff,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
53
+ ddpm-celebahq-256,google,UNet2DModel,1827,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,113662494,0,a5e0de9e,apache-2.0,Unconditional Image Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
54
+ ddpm-cifar10-32,google,UNet2DModel,1945,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,35742306,0,31e11b2b,apache-2.0,Unconditional Image Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
55
+ electra-base-discriminator,google,ElectraForPreTraining,179212,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,109105394,0,8a65da14,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
56
+ electra-base-generator,google,ElectraForMaskedLM,30181,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,56802220,0,55ef183d,apache-2.0,Fill-Mask,pytorch,-,-,-,-,-,-,-,-,,,,,,,
57
+ electra-large-discriminator,google,ElectraForPreTraining,46237,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,334639574,0,b3e531eb,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
58
+ electra-small-discriminator,google,ElectraForPreTraining,446832,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,13486322,0,70bef88d,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
59
+ fnet-base,google,FNetForMaskedLM,178925,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,ce0cff8a,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
60
+ mobilebert-uncased,google,MobileBertForMaskedLM,48600,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,40247413,0,4295f30f,apache-2.0,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
61
+ owlvit-base-patch16,google,OwlViTForObjectDetection,2261,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,2a2d9322,apache-2.0,Object Detection,pytorch,-,-,-,-,-,-,-,-,,,,,,,
62
+ owlvit-base-patch32,google,OwlViTForObjectDetection,10221,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,302ff610,apache-2.0,Object Detection,pytorch,-,-,-,-,-,-,-,-,,,,,,,
63
+ owlvit-large-patch14,google,OwlViTForObjectDetection,2642,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,-,0,2565922f,apache-2.0,Object Detection,pytorch,-,-,-,-,-,-,-,-,,,,,,,
64
+ t5-small-ssm-nq,google,Linear,2505,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,196608,0,920c0322,apache-2.0,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
65
+ vit-base-patch16-224-in21k,google,ViTModel,614852,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86389357,0,993623dd,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
66
+ vit-base-patch16-224,google,ViTForImageClassification,1305984,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86567765,0,8fa842d1,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
67
+ vit-base-patch16-384,google,ViTForImageClassification,7771,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86859605,0,8fa842d1,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
68
+ vit-base-patch32-224-in21k,google,ViTModel,3348,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,88045933,0,307dc71a,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
69
+ vit-base-patch32-384,google,ViTForImageClassification,1806,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,88297301,0,da31f94d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
70
+ vit-huge-patch14-224-in21k,google,ViTModel,927,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,632404749,0,e6073acb,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
71
+ vit-large-patch16-224-in21k,google,ViTModel,642,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304351437,0,afcb2f64,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
72
+ vit-large-patch16-224,google,ViTForImageClassification,607,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304326837,0,62c9365b,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
73
+ vit-large-patch16-384,google,ViTForImageClassification,684,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304715957,0,62c9365b,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
74
+ vit-large-patch32-224-in21k,google,ViTModel,882,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86389357,0,993623dd,apache-2.0,Feature Extraction,pytorch,-,-,-,-,-,-,-,-,,,,,,,
75
+ vit-large-patch32-384,google,ViTForImageClassification,3062,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,306632885,0,05fbb6ac,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
76
+ beit-base-patch16-224-pt22k-ft22k,microsoft,BeitForImageClassification,13214,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,108040913,0,17293472,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
77
+ beit-base-patch16-224-pt22k,microsoft,BeitForMaskedImageModeling,1999,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,92422044,0,76e338ee,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
78
+ beit-base-patch16-224,microsoft,BeitForImageClassification,4097,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,92014184,0,cd2ea289,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
79
+ beit-base-patch16-384,microsoft,BeitForImageClassification,2193,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,134367464,0,cd2ea289,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
80
+ beit-large-patch16-224-pt22k-ft22k,microsoft,BeitForImageClassification,384,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,340414369,0,16db572d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
81
+ beit-large-patch16-224-pt22k,microsoft,BeitForMaskedImageModeling,542,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,312142432,0,de648727,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
82
+ beit-large-patch16-384,microsoft,BeitForImageClassification,252,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,431994424,0,b7efd875,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
83
+ beit-large-patch16-512,microsoft,BeitForImageClassification,2832,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,707589688,0,b7efd875,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
84
+ codebert-base-mlm,microsoft,RobertaForMaskedLM,273375,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,163311822,0,bb3e7c3b,-,Fill-Mask,pytorch,-,-,-,-,-,-,-,-,,,,,,,
85
+ cvt-13,microsoft,CvtForImageClassification,7775,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,19984994,0,7d8bd070,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
86
+ prophetnet-large-uncased,microsoft,ProphetNetForConditionalGeneration,5629,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,421677051,0,dd2215e4,-,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
87
+ resnet-101,microsoft,ResNetForImageClassification,303,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,44496488,0,c25a8655,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
88
+ resnet-152,microsoft,ResNetForImageClassification,303,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,60117096,0,432f1b45,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
89
+ resnet-18,microsoft,ResNetForImageClassification,677,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,11684712,0,4fa34148,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
90
+ resnet-34,microsoft,ResNetForImageClassification,288,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,21789160,0,34b5e579,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
91
+ resnet-50,microsoft,ResNetForImageClassification,113970,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,25530472,0,649b58e4,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
92
+ swin-base-patch4-window12-384-in22k,microsoft,SwinForImageClassification,1546,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,119270870,0,00040b7f,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
93
+ swin-base-patch4-window12-384,microsoft,SwinForImageClassification,381,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,97908845,0,4ae8ed0d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
94
+ swin-base-patch4-window7-224-in22k,microsoft,SwinForImageClassification,6434,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,110250050,0,00040b7f,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
95
+ swin-base-patch4-window7-224,microsoft,SwinForImageClassification,1783,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,88888025,0,4ae8ed0d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
96
+ swin-large-patch4-window12-384-in22k,microsoft,SwinForImageClassification,26264,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,242572310,0,c296f66d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
97
+ swin-large-patch4-window7-224-in22k,microsoft,SwinForImageClassification,244,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,230104510,0,c296f66d,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
98
+ swin-large-patch4-window7-224,microsoft,SwinForImageClassification,8406,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,198071893,0,cb300b56,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
99
+ swin-small-patch4-window7-224,microsoft,SwinForImageClassification,562,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,50516251,0,90e0ffd2,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
100
+ swin-tiny-patch4-window7-224,microsoft,SwinForImageClassification,7898,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,28818337,0,d403933e,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
101
+ swinv2-tiny-patch4-window8-256,microsoft,SwinForImageClassification,1754,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,28771675,0,d403933e,apache-2.0,Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
102
+ trocr-base-handwritten,microsoft,ViTModel,6461,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86653549,0,e45f61ed,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
103
+ trocr-base-printed,microsoft,ViTModel,18133,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,86653549,0,e45f61ed,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
104
+ trocr-large-handwritten,microsoft,ViTModel,1876,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304666829,0,4b504cc2,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
105
+ trocr-large-printed,microsoft,ViTModel,2727,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304666829,0,4b504cc2,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
106
+ trocr-large-str,microsoft,ViTModel,229,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,304666829,0,4b504cc2,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
107
+ trocr-small-handwritten,microsoft,DeiTModel,1138,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,21960301,0,5513139b,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
108
+ trocr-small-stage1,microsoft,VisionEncoderDecoderModel,585,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,61316403,0,d071f647,-,-,pytorch,-,-,-,-,-,-,-,-,,,,,,,
109
+ xprophetnet-large-wiki100-cased,microsoft,XLMProphetNetForConditionalGeneration,540,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,871333730,0,105cdd91,-,Text2Text Generation,pytorch,-,-,-,-,-,-,-,-,,,,,,,
110
+ clip-vit-base-patch16,openai,CLIPModel,70786,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,149585208,0,5fa6777a,-,Zero-Shot Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
111
+ clip-vit-base-patch32,openai,CLIPModel,2330296,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,151241784,0,25380eec,-,Zero-Shot Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,
112
+ clip-vit-large-patch14,openai,CLIPModel,11601851,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,427563136,0,d79341f4,-,Zero-Shot Image Classification,pytorch,-,-,-,-,-,-,-,-,,,,,,,