huggingface-modelhub / huggingface-modelhub.py
dk-crazydiv's picture
Adding downloads_last_month and library fields
ea3ef1b
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata information of all the models available on HuggingFace's modelhub"""
import ast
import csv
import datasets
# Some readme files on modelhub are large in size
csv.field_size_limit(100000000)
_CITATION = """\
"""
_DESCRIPTION = """\
Metadata information of all the models available on HuggingFace's modelhub
"""
_HOMEPAGE = "https://huggingface.co/models"
_LICENSE = ""
_URL = "huggingface-modelhub.csv"
class HuggingfaceModelhub(datasets.GeneratorBasedBuilder):
"""Metadata information of all the models available on HuggingFace's modelhub"""
VERSION = datasets.Version("1.0.2")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"modelId": datasets.Value("string"),
"lastModified": datasets.Value("string"),
"tags": datasets.features.Sequence(datasets.Value("string")),
"pipeline_tag": datasets.Value("string"),
"files": datasets.features.Sequence(datasets.Value("string")),
"publishedBy": datasets.Value("string"),
"downloads_last_month": datasets.Value("int32"),
"library": datasets.Value("string"),
"modelCard": datasets.Value("large_string"),
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_file = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
reader = csv.reader(f)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, {
"modelId": row[0],
"lastModified": row[1],
"tags": ast.literal_eval(row[2]),
"pipeline_tag": row[3],
"files": ast.literal_eval(row[4]),
"publishedBy": row[5],
"downloads_last_month": float(row[6]) if row[6] else 0,
"library": row[7],
"modelCard": row[8]
}