|
from datasets import load_dataset, Dataset |
|
import pandas as pd |
|
from collections import defaultdict |
|
import pygments |
|
|
|
list_languages = ['ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison', |
|
'bluespec', 'c', 'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir', |
|
'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java', |
|
'java-server-pages', 'javascript', 'stan', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell', |
|
'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog', |
|
'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme', |
|
'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex', |
|
'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig'] |
|
|
|
lmap = {'c-sharp':'csharp', 'f-sharp':'fsharp', 'standard-ml':'sml', 'batchfile':'batch','java-server-pages':'jsp'} |
|
|
|
extra_columns = [ |
|
"hexsha", |
|
"max_stars_repo_path", |
|
"max_stars_repo_name", |
|
"max_stars_repo_head_hexsha", |
|
"max_stars_repo_stars_event_min_datetime", |
|
"max_stars_repo_stars_event_max_datetime", |
|
"max_issues_repo_path", |
|
"max_issues_repo_name", |
|
"max_issues_repo_head_hexsha", |
|
"max_issues_repo_licenses", |
|
"max_issues_count", |
|
"max_issues_repo_issues_event_min_datetime", |
|
"max_issues_repo_issues_event_max_datetime", |
|
"max_forks_repo_path", |
|
"max_forks_repo_name", |
|
"max_forks_repo_head_hexsha", |
|
"max_forks_repo_licenses", |
|
"max_forks_count", |
|
"max_forks_repo_forks_event_min_datetime", |
|
"max_forks_repo_forks_event_max_datetime", |
|
] |
|
|
|
seed = 0 |
|
size = 20_000 |
|
buffer_size = 40_000 |
|
max_data_per_ext = 1000 |
|
df = pd.DataFrame( |
|
columns=[ |
|
"extension", |
|
"language", |
|
"count", |
|
"low_alphanum_count", |
|
"long_lines_count", |
|
"non_lexable_count", |
|
] |
|
) |
|
|
|
def low_alphanum(example): |
|
return {"low_alphanum": example["alphanum_fraction"] < 0.25} |
|
|
|
def long_line(example): |
|
return {"long_lines": example["max_line_length"] > 1000 or example["avg_line_length"] > 100} |
|
|
|
def pygments_language_id_to_thestack_language_id(str): |
|
if str in lmap: |
|
return lmap[str] |
|
return str |
|
|
|
def can_lex_without_errors(lexer, contents: str): |
|
tokens = pygments.lex(contents, lexer) |
|
for (tok_type, tok_text) in tokens: |
|
if tok_type == pygments.token.Token.Error: |
|
return False |
|
return True |
|
|
|
def lexable(example, language): |
|
try: |
|
lexer = pygments.lexers.get_lexer_by_name(pygments_language_id_to_thestack_language_id(language)) |
|
except: |
|
return {"lexable": "notfound"} |
|
return {"lexable": can_lex_without_errors(lexer, example["content"])} |
|
|
|
|
|
for language in list_languages: |
|
thestack = load_dataset( |
|
"bigcode/the-stack", |
|
use_auth_token=True, |
|
split="train", |
|
streaming=True, |
|
data_dir=f"data/{language}", |
|
) |
|
thestack = thestack.shuffle(seed=seed, buffer_size=buffer_size) |
|
print(f"subset {language} ready, now selecting {size} samples") |
|
|
|
|
|
small_ds = list(thestack.take(size)) |
|
small_ds = Dataset.from_pandas(pd.DataFrame(data=small_ds)) |
|
small_ds = small_ds.remove_columns(extra_columns) |
|
print(f"Dataset of {size} samples of {language} creaded") |
|
|
|
|
|
dict_extensions = defaultdict(int) |
|
for extension in small_ds["ext"]: |
|
dict_extensions[extension] += 1 |
|
dict_extensions = dict(dict_extensions) |
|
print(f"Initial extension dist: {dict_extensions}") |
|
|
|
|
|
for ext in dict_extensions: |
|
ext_ds = small_ds.filter(lambda x: x["ext"] == ext) |
|
real_count = min(max_data_per_ext, len(ext_ds)) |
|
ext_ds = ext_ds.select(range(real_count)) |
|
|
|
|
|
ext_ds = ext_ds.map(low_alphanum) |
|
ext_ds = ext_ds.map(long_line) |
|
ext_ds = ext_ds.map(lambda x: lexable(x, language)) |
|
|
|
low_alphanum_count = sum( |
|
low_alphanum for low_alphanum in ext_ds["low_alphanum"] |
|
) |
|
long_lines_count = sum(long_line for long_line in ext_ds["long_lines"]) |
|
non_lexable_count = sum(not lexable for lexable in ext_ds["lexable"]) |
|
|
|
new_dict = { |
|
"extension": ext, |
|
"language": language, |
|
"count": real_count, |
|
"low_alphanum_count": low_alphanum_count, |
|
"long_lines_count": long_lines_count, |
|
"non_lexable_count": non_lexable_count, |
|
} |
|
df = df.append(new_dict, ignore_index=True) |
|
print(f"New extension count: {new_dict}") |
|
|
|
path = f"./data/{language}/{ext}/data.json" |
|
ext_ds.to_json(path) |
|
print(f"Subset of langugae: {language}, and extension: {ext} saved") |
|
|
|
|
|
df.to_csv("./data/extension_distribution.csv") |