Datasets:

Languages:
code
ArXiv:
License:
Muennighoff commited on
Commit
cd79dea
1 Parent(s): 7a98ce0

Create commitpack.py

Browse files
Files changed (1) hide show
  1. commitpack.py +91 -0
commitpack.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CommitPack"""
2
+
3
+ import json
4
+ import datasets
5
+
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+ ### To create paths ###
10
+ def get_paths():
11
+ import json, glob, os
12
+ files = {}
13
+ for lang_dir in os.listdir("./data"):
14
+ print("Processing", lang_dir)
15
+ if not os.path.isdir("data/" + lang_dir):
16
+ print(f"Skipping {lang_dir} as it is not a directory")
17
+ continue
18
+ for file in glob.glob(f"data/{lang_dir}/*.jsonl"):
19
+ files[lang_dir] = files.get(lang_dir, []) + [file]
20
+ with open(f"paths.json", "w") as f:
21
+ json.dump(files, f)
22
+ return files
23
+
24
+ _CITATION = """"""
25
+
26
+ _DESCRIPTION = """"""
27
+
28
+ URL = "https://huggingface.co/datasets/bigcode/commitpack/resolve/main/paths.json"
29
+
30
+ _LANG = ["json", "xml", "text", "javascript", "objective-c++", "python", "c", "c++", "markdown", "java", "html", "yaml", "go", "csv", "php", "jupyter-notebook", "gettext-catalog", "sql", "unity3d-asset", "typescript", "web-ontology-language", "ruby", "c#", "nix", "shell", "perl", "tex", "css", "restructuredtext", "rust", "groff", "ini", "scala", "coffeescript", "haskell", "swift", "lua", "svg", "gas", "ocaml", "erlang", "makefile", "asciidoc", "emacs-lisp", "scss", "clojure", "org", "common-lisp", "diff", "groovy", "html+erb", "nesc", "dart", "powershell", "f#", "dm", "kotlin", "pascal", "jsx", "viml", "actionscript", "cython", "turtle", "less", "mathematica", "xslt", "scheme", "perl6", "edn", "fortran", "java-server-pages", "standard-ml", "cmake", "json5", "vala", "vue", "freemarker", "graphql", "twig", "tcl", "pod", "dockerfile", "yacc", "postscript", "racket", "eagle", "haxe", "julia", "handlebars", "smarty", "visual-basic", "literate-haskell", "smalltalk", "isabelle", "nimrod", "zig", "m4", "max", "elixir", "mako", "arduino", "jade", "haml", "elm", "purebasic", "coldfusion", "lean", "r", "cuda", "textile", "robotframework", "abap", "rdoc", "llvm", "ada", "batchfile", "qml", "jasmin", "assembly", "g-code", "cucumber", "html+php", "kicad", "api-blueprint", "eiffel", "toml", "modelica", "bitbake", "lex", "stylus", "protocol-buffer", "unknown", "nit", "factor", "xs", "sass", "parrot-internal-representation", "html+django", "mediawiki", "logos", "genshi", "coldfusion-cfc", "xtend", "sqf", "vhdl", "antlr", "systemverilog", "hcl", "asp", "nsis", "inform-7", "slim", "groovy-server-pages", "ceylon", "fish", "processing", "component-pascal", "lasso", "glsl", "saltstack", "xbase", "autohotkey", "liquid", "purescript", "agda", "inno-setup", "oz", "chapel", "arc", "opencl", "graphviz-dot", "pawn", "jsoniq", "bluespec", "smali", "krl", "maple", "unrealscript", "ooc", "pure-data", "xquery", "digital-command-language", "moonscript", "awk", "pike", "livescript", "solidity", "monkey", "jsonld", "zephir", "crystal", "rhtml", "stata", "idris", "raml", "openscad", "red", "c2hs-haskell", "cycript", "applescript", "mupad", "literate-agda", "boo", "sourcepawn", "qmake", "ragel-in-ruby-host", "io", "desktop", "propeller-spin", "thrift", "volt", "xproc", "igor-pro", "lolcode", "html+eex", "logtalk", "mirah", "gnuplot", "literate-coffeescript", "jflex", "emberscript", "cobol", "yang", "rebol", "linker-script", "cartocss", "urweb", "rmarkdown", "darcs-patch", "csound", "squirrel", "apl", "hlsl", "latte", "pony", "ioke", "hy", "uno", "pan", "xojo", "papyrus", "stan", "slash", "supercollider", "vcl", "smt", "glyph", "wisp", "renpy", "clips", "dns-zone", "sas", "rouge", "ec", "dylan", "tcsh", "aspectj", "netlogo", "gap", "fancy", "coq", "click", "capn-proto", "flux", "forth", "ats", "netlinx", "clean", "parrot-assembly", "alloy", "lfe", "gdscript", "augeas", "sparql", "lilypond", "scilab", "autoit", "myghty", "blitzmax", "creole", "harbour", "piglatin", "opa", "sage", "ston", "maxscript", "lsl", "gentoo-ebuild", "nu", "bro", "xc", "j", "metal", "module-management-system", "webidl", "tea", "redcode", "shen", "pov-ray-sdl", "x10", "brainfuck", "ninja", "golo", "webassembly", "self", "labview", "octave", "pogoscript", "d", "http", "ecl", "chuck", "gosu", "parrot", "opal", "objective-j", "kit", "gams", "prolog", "clarion", "mask", "brightscript", "scaml", "matlab", "idl", "ags-script", "lookml", "apacheconf", "oxygene", "txl", "grammatical-framework", "renderscript", "mtml", "unified-parallel-c", "dogescript", "gentoo-eclass", "zimpl", "irc-log", "fantom", "numpy", "cirru", "xpages", "nginx", "objdump", "python-traceback", "realbasic", "befunge", "bison", "m", "omgrofl"]
31
+
32
+ _LICENSE = "Apache License 2.0"
33
+ _VERSION = datasets.Version("1.0.0", "")
34
+
35
+
36
+ class CommitPack(datasets.GeneratorBasedBuilder):
37
+ BUILDER_CONFIGS = [
38
+ datasets.BuilderConfig(
39
+ name=lang,
40
+ description=f"CommitPack {lang}",
41
+ version=_VERSION,
42
+ )
43
+ for lang in _LANG
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=datasets.Features(
50
+ {
51
+ "commit": datasets.Value("string"),
52
+ "old_file": datasets.Value("string"),
53
+ "new_file": datasets.Value("string"),
54
+ "old_contents": datasets.Value("string"),
55
+ "new_contents": datasets.Value("string"),
56
+ "subject": datasets.Value("string"),
57
+ "message": datasets.Value("string"),
58
+ "lang": datasets.Value("string"),
59
+ "license": datasets.Value("string"),
60
+ "repos": datasets.Value("string"),
61
+ }
62
+ ),
63
+ supervised_keys=None,
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+
69
+ path_file = dl_manager.download(URL)
70
+ with open(path_file, "r") as f:
71
+ files = json.load(f)
72
+
73
+ downloaded_files = dl_manager.download(files[self.config.name])
74
+ return [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ gen_kwargs={'filepaths': downloaded_files}
78
+ )
79
+ ]
80
+
81
+ def _generate_examples(self, filepaths):
82
+ """This function returns the examples in the raw (text) form."""
83
+ logger.info("Generating examples from", filepaths)
84
+
85
+ id_ = 0
86
+ for p in filepaths:
87
+ with open(p, "r") as f:
88
+ for row in f:
89
+ data = json.loads(row)
90
+ yield id_, data
91
+ id_ += 1