Datasets:

Languages:
code
ArXiv:
License:
File size: 7,110 Bytes
cd79dea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b345642
 
cd79dea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b345642
 
 
 
 
 
 
 
 
 
 
 
cd79dea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
"""CommitPack"""

import json
import datasets


logger = datasets.logging.get_logger(__name__)

### To create paths ###
def get_paths():
    import json, glob, os
    files = {}
    for lang_dir in os.listdir("./data"):
        print("Processing", lang_dir)
        if not os.path.isdir("data/" + lang_dir):
            print(f"Skipping {lang_dir} as it is not a directory")
            continue
        for file in glob.glob(f"data/{lang_dir}/*.jsonl"):
            files[lang_dir] = files.get(lang_dir, []) + [file]
    with open(f"paths.json", "w") as f:
        json.dump(files, f)
    return files

_CITATION = """"""

_DESCRIPTION = """"""

URL = "https://huggingface.co/datasets/bigcode/commitpack/resolve/main/paths.json"

_LANG = ["json", "xml", "text", "javascript", "objective-c++", "python", "c", "c++", "markdown", "java", "html", "yaml", "go", "csv", "php", "jupyter-notebook", "gettext-catalog", "sql", "unity3d-asset", "typescript", "web-ontology-language", "ruby", "c#", "nix", "shell", "perl", "tex", "css", "restructuredtext", "rust", "groff", "ini", "scala", "coffeescript", "haskell", "swift", "lua", "svg", "gas", "ocaml", "erlang", "makefile", "asciidoc", "emacs-lisp", "scss", "clojure", "org", "common-lisp", "diff", "groovy", "html+erb", "nesc", "dart", "powershell", "f#", "dm", "kotlin", "pascal", "jsx", "viml", "actionscript", "cython", "turtle", "less", "mathematica", "xslt", "scheme", "perl6", "edn", "fortran", "java-server-pages", "standard-ml", "cmake", "json5", "vala", "vue", "freemarker", "graphql", "twig", "tcl", "pod", "dockerfile", "yacc", "postscript", "racket", "eagle", "haxe", "julia", "handlebars", "smarty", "visual-basic", "literate-haskell", "smalltalk", "isabelle", "nimrod", "zig", "m4", "max", "elixir", "mako", "arduino", "jade", "haml", "elm", "purebasic", "coldfusion", "lean", "r", "cuda", "textile", "robotframework", "abap", "rdoc", "llvm", "ada", "batchfile", "qml", "jasmin", "assembly", "g-code", "cucumber", "html+php", "kicad", "api-blueprint", "eiffel", "toml", "modelica", "bitbake", "lex", "stylus", "protocol-buffer", "unknown", "nit", "factor", "xs", "sass", "parrot-internal-representation", "html+django", "mediawiki", "logos", "genshi", "coldfusion-cfc", "xtend", "sqf", "vhdl", "antlr", "systemverilog", "hcl", "asp", "nsis", "inform-7", "slim", "groovy-server-pages", "ceylon", "fish", "processing", "component-pascal", "lasso", "glsl", "saltstack", "xbase", "autohotkey", "liquid", "purescript", "agda", "inno-setup", "oz", "chapel", "arc", "opencl", "graphviz-dot", "pawn", "jsoniq", "bluespec", "smali", "krl", "maple", "unrealscript", "ooc", "pure-data", "xquery", "digital-command-language", "moonscript", "awk", "pike", "livescript", "solidity", "monkey", "jsonld", "zephir", "crystal", "rhtml", "stata", "idris", "raml", "openscad", "red", "c2hs-haskell", "cycript", "applescript", "mupad", "literate-agda", "boo", "sourcepawn", "qmake", "ragel-in-ruby-host", "io", "desktop", "propeller-spin", "thrift", "volt", "xproc", "igor-pro", "lolcode", "html+eex", "logtalk", "mirah", "gnuplot", "literate-coffeescript", "jflex", "emberscript", "cobol", "yang", "rebol", "linker-script", "cartocss", "urweb", "rmarkdown", "darcs-patch", "csound", "squirrel", "apl", "hlsl", "latte", "pony", "ioke", "hy", "uno", "pan", "xojo", "papyrus", "stan", "slash", "supercollider", "vcl", "smt", "glyph", "wisp", "renpy", "clips", "dns-zone", "sas", "rouge", "ec", "dylan", "tcsh", "aspectj", "netlogo", "gap", "fancy", "coq", "click", "capn-proto", "flux", "forth", "ats", "netlinx", "clean", "parrot-assembly", "alloy", "lfe", "gdscript", "augeas", "sparql", "lilypond", "scilab", "autoit", "myghty", "blitzmax", "creole", "harbour", "piglatin", "opa", "sage", "ston", "maxscript", "lsl", "gentoo-ebuild", "nu", "bro", "xc", "j", "metal", "module-management-system", "webidl", "tea", "redcode", "shen", "pov-ray-sdl", "x10", "brainfuck", "ninja", "golo", "webassembly", "self", "labview", "octave", "pogoscript", "d", "http", "ecl", "chuck", "gosu", "parrot", "opal", "objective-j", "kit", "gams", "prolog", "clarion", "mask", "brightscript", "scaml", "matlab", "idl", "ags-script", "lookml", "apacheconf", "oxygene", "txl", "grammatical-framework", "renderscript", "mtml", "unified-parallel-c", "dogescript", "gentoo-eclass", "zimpl", "irc-log", "fantom", "numpy", "cirru", "xpages", "nginx", "objdump", "python-traceback", "realbasic", "befunge", "bison", "m", "omgrofl"]

_LICENSE = "Apache License 2.0"
_VERSION = datasets.Version("1.0.0", "")


class CommitPack(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=lang,
            description=f"CommitPack {lang}",
            version=_VERSION,
        )
        for lang in _LANG
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "commit": datasets.Value("string"),
                    "old_file": datasets.Value("string"),
                    "new_file": datasets.Value("string"),
                    "old_contents": datasets.Value("string"),
                    "new_contents": datasets.Value("string"),
                    "subject": datasets.Value("string"),
                    "message": datasets.Value("string"),
                    "lang": datasets.Value("string"),
                    "license": datasets.Value("string"),
                    "repos": datasets.Value("string"),
#                    "returncode": datasets.Value("int64"),
#                    "stderr": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            citation=_CITATION,
        )
    
    def _split_generators(self, dl_manager):

        path_file = dl_manager.download(URL)
        with open(path_file, "r") as f:
            files = json.load(f)

        downloaded_files = dl_manager.download(files[self.config.name])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={'filepaths': downloaded_files}
            )
        ]

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form."""
        logger.info("Generating examples from", filepaths)

        id_ = 0
        for p in filepaths:
            with open(p, "r") as f:
                for row in f:
                    data = json.loads(row)
                    yield id_, {
                        "commit": data["commit"],
                        "old_file": data["old_file"],
                        "new_file": data["new_file"],
                        "old_contents": data["old_contents"],
                        "new_contents": data["new_contents"],
                        "subject": data["subject"],
                        "message": data["message"],
                        "lang": data["lang"],
                        "license": data["license"],
                        "repos": data["repos"],                    
                    }
                    id_ += 1