from datasets import load_dataset, Dataset import pandas as pd list_languages = ['ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison', 'bluespec', 'c', 'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir', 'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java', 'java-server-pages', 'javascript', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell', 'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog', 'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme', 'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex', 'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig'] seed = 0 size = 10_000 for language in list_languages: thestack = load_dataset('bigcode/the-stack', use_auth_token=True, split="train", streaming=True, data_dir=f"data/{language}") print(f"subset {language} loaded") ds = thestack.shuffle(seed=seed) # 10k subset of random samples from ds small_ds = list(ds.take(size)) # convert to Datasets small_ds = Dataset.from_pandas(pd.DataFrame(data=small_ds)) print(f"Dataset of {size} samples of {language} creaded") print(f"Some tests: example 10 {small_ds[10]['lang']}") path = f"./data/{language}/data.json" small_ds.to_json(path) print(f"Small subset of {language} saved")