Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Libraries:
Datasets
License:
commitpackft / commitpackft.py
Muennighoff's picture
Update commitpackft.py
d52e9f2
raw
history blame
No virus
5.46 kB
"""CommitPackFT"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
TODO
"""
_DESCRIPTION = """\
TODO
"""
_LANG = ["ruby", "yaml", "python", "markdown", "javascript", "json", "shell", "text", "php", "java", "html", "c#", "xml", "html+erb", "c", "ini", "coffeescript", "swift", "restructuredtext", "typescript", "c++", "scss", "go", "scala", "haml", "css", "rust", "toml", "jsx", "kotlin", "clojure", "perl", "bitbake", "groovy", "twig", "nix", "sql", "less", "haskell", "handlebars", "unknown", "batchfile", "cucumber", "makefile", "elixir", "jade", "cmake", "powershell", "slim", "emacs-lisp", "dart", "viml", "asciidoc", "lua", "llvm", "smarty", "diff", "common-lisp", "saltstack", "vue", "sass", "fish", "erlang", "freemarker", "stylus", "qml", "hcl", "html+django", "mako", "ada", "ocaml", "f#", "elm", "tex", "rdoc", "csv", "protocol-buffer", "smalltalk", "arduino", "java-server-pages", "scheme", "groff", "objective-c++", "desktop", "factor", "crystal", "rhtml", "haxe", "glsl", "gas", "html+php", "qmake", "julia", "cython", "html+eex", "tcl", "org", "perl6", "m4", "xslt", "svg", "nimrod", "r", "robotframework", "racket", "textile", "assembly", "purescript", "unity3d-asset", "visual-basic", "dm", "pod", "standard-ml", "fortran", "gettext-catalog", "idris", "livescript", "xtend", "actionscript", "vala", "awk", "ceylon", "jupyter-notebook", "dockerfile", "rouge", "asp", "sqf", "edn", "liquid", "xquery", "linker-script", "mediawiki", "parrot-internal-representation", "solidity", "json5", "systemverilog", "thrift", "groovy-server-pages", "processing", "cuda", "graphviz-dot", "inno-setup", "api-blueprint", "nsis", "gentoo-ebuild", "logtalk", "jasmin", "literate-coffeescript", "webidl", "coldfusion-cfc", "opencl", "openscad", "pan", "pascal", "pony", "turtle", "chapel", "ioke", "ooc", "sparql", "applescript", "augeas", "g-code", "mirah", "capn-proto", "digital-command-language", "hy", "logos", "modelica", "vcl", "antlr", "gdscript", "graphql", "hlsl", "gnuplot", "http", "ninja", "oz", "raml", "aspectj", "autohotkey", "fancy", "moonscript", "piglatin", "stata", "urweb", "xs", "yang", "agda", "coldfusion", "emberscript", "latte", "literate-haskell", "postscript", "scilab", "tcsh", "volt", "apl", "genshi", "jsonld", "krl", "lean", "lfe", "metal", "monkey", "mupad", "nesc", "nit", "pike", "purebasic", "renpy", "vhdl", "xproc", "zephir", "apacheconf", "boo", "brainfuck", "bro", "cartocss", "creole", "csound", "dylan", "eagle", "ecl", "eiffel", "flux", "io", "jsoniq", "lilypond", "lsl", "mask", "nginx", "nu", "pov-ray-sdl", "ragel-in-ruby-host", "slash", "sourcepawn", "squirrel", "ston", "uno", "wisp", "xbase", "yacc", "zig", "abap", "arc", "ats", "blitzmax", "bluespec", "c2hs-haskell", "clean", "dns-zone", "forth", "harbour", "igor-pro", "inform-7", "isabelle", "jflex", "literate-agda", "maple", "mathematica", "module-management-system", "mtml", "netlinx", "parrot-assembly", "pawn", "propeller-spin", "pure-data", "rebol", "red", "sage", "sas", "scaml", "smt", "supercollider", "unrealscript", "xpages"]
_URL = "https://huggingface.co/datasets/bigcode/commitpackft/resolve/main/data/{lang}/data.jsonl"
_VERSION = datasets.Version("1.0.0", "")
class CommitPackFT(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=lang,
description=f"CommitPackFT {lang}",
version=_VERSION,
)
for lang in _LANG
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"commit": datasets.Value("string"),
"old_file": datasets.Value("string"),
"new_file": datasets.Value("string"),
"old_contents": datasets.Value("string"),
"new_contents": datasets.Value("string"),
"subject": datasets.Value("string"),
"message": datasets.Value("string"),
"lang": datasets.Value("string"),
"license": datasets.Value("string"),
"repos": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URL.format(lang=self.config.name))
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'filepath': downloaded_files}
)
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"commit": data["commit"],
"old_file": data["old_file"],
"new_file": data["new_file"],
"old_contents": data["old_contents"],
"new_contents": data["new_contents"],
"subject": data["subject"],
"message": data["message"],
"lang": data["lang"],
"license": data["license"],
"repos": data["repos"],
}