The dataset viewer is not available for this dataset.
Cannot get the config names for the dataset.
Error code:   ConfigNamesError
Exception:    ValueError
Message:      Couldn't infer the same data file format for all splits. Got {NamedSplit('train'): ('text', {}), NamedSplit('validation'): ('text', {}), NamedSplit('test'): ('json', {})}
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/dataset/config_names.py", line 55, in compute_config_names_response
                  for config in sorted(get_dataset_config_names(path=dataset, token=hf_token))
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 351, in get_dataset_config_names
                  dataset_module = dataset_module_factory(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1512, in dataset_module_factory
                  raise e1 from None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1489, in dataset_module_factory
                  return HubDatasetModuleFactoryWithoutScript(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1054, in get_module
                  module_name, default_builder_kwargs = infer_module_for_data_files(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 513, in infer_module_for_data_files
                  raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
              ValueError: Couldn't infer the same data file format for all splits. Got {NamedSplit('train'): ('text', {}), NamedSplit('validation'): ('text', {}), NamedSplit('test'): ('json', {})}

Need help to make the dataset viewer work? Open a discussion for direct support.

Paper on arXiv

pre-training data

You need to manually combine each dataset if you want to use a multilingual dataset.

from datasets import load_dataset
xcsn_pt_python_en = load_dataset("ynklab/XCodeSearchNet", data_dir='pretraining/python/en')
"""
DatasetDict({
    train: Dataset({
        features: ['function_tokens', 'docstring'],
        num_rows: 453623
    })
    validation: Dataset({
        features: ['function_tokens', 'docstring'],
        num_rows: 4596
    })
    test: Dataset({
        features: ['function_tokens', 'docstring'],
        num_rows: 45283
    })
})
"""
print(xcsn_pt_python_en['train'][0])
"""
{
  'function_tokens': ['def', 'get_feature_ide_paths', '(', 'container_dir', ',', 'product_name', ')', ':', 'repo_name', '=', 'get_repo_name', '(', 'container_dir', ')', 'class', 'Paths', '(', 'object', ')', ':', 'feature_order_json', '=', 'os', '.', 'path', '.', 'join', '(', 'container_dir', ',', "'_lib/featuremodel/productline/feature_order.json'", ')', 'model_xml_path', '=', 'os', '.', 'path', '.', 'join', '(', 'container_dir', ',', "'_lib/featuremodel/productline/model.xml'", ')', 'config_file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'container_dir', ',', "'_lib/featuremodel/productline/products/'", ',', 'repo_name', ',', 'product_name', ',', "'product.equation.config'", ')', 'equation_file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'container_dir', ',', "'products'", ',', 'product_name', ',', "'product.equation'", ')', 'product_spec_path', '=', 'os', '.', 'path', '.', 'join', '(', 'container_dir', ',', "'_lib/featuremodel/productline/products/'", ',', 'repo_name', ',', "'product_spec.json'", ')', 'return', 'Paths'],
  'docstring': 'Takes the container_dir and the product name and returns all relevant paths from the\n    feature_order_json to the config_file_path.\n    :param container_dir: the full path of the container dir\n    :param product_name: the name of the product\n    :return: object with divert path attributes'
}
"""

fine-tuning data

from datasets import load_dataset
xcsn_ft_python_en = load_dataset("ynklab/XCodeSearchNet", data_dir='finetuning/python/en')
"""
DatasetDict({
    train: Dataset({
        features: ['text'],
        num_rows: 1648684
    })
    validation: Dataset({
        features: ['text'],
        num_rows: 92426
    })
})
"""
print(xcsn_ft_python_en['train'][0])
"""
{
  'text': '1<CODESPLIT><CODESPLIT><CODESPLIT>Logs the definition of the object that was just auto - decorated inside the ipython notebook .<CODESPLIT>def _logdef ( self , n , o , otype ) : import re try : #The latest input cell will be the one that this got executed #from. TODO: actually, if acorn got imported after the fact, then #the import would have caused all the undecorated functions to be #decorated as soon as acorn imported. I suppose we just won\'t have #any code for that case. if otype == "classes" : cellno = max ( [ int ( k [ 2 : ] ) for k in self . shell . user_ns . keys ( ) if re . match ( "_i\\d+" , k ) ] ) elif otype == "functions" : cellno = int ( o . __code__ . co_filename . strip ( "<>" ) . split ( \'-\' ) [ 2 ] ) except : #This must not have been an ipython notebook declaration, so we #don\'t store the code. cellno = None pass code = "" if cellno is not None : cellstr = "_i{0:d}" . format ( cellno ) if cellstr in self . shell . user_ns : cellcode = self . shell . user_ns [ cellstr ] import ast astm = ast . parse ( cellcode ) ab = astm . body parts = { ab [ i ] . name : ( ab [ i ] . lineno , None if i + 1 >= len ( ab ) else ab [ i + 1 ] . lineno ) for i , d in enumerate ( ab ) } if n in parts : celllines = cellcode . split ( \'\\n\' ) start , end = parts [ n ] if end is not None : code = celllines [ start - 1 : end - 1 ] else : code = celllines [ start - 1 : ] #Now, we actually create the entry. Since the execution for function #definitions is almost instantaneous, we just log the pre and post #events at the same time. from time import time from acorn . logging . database import record entry = { "m" : "def" , "a" : None , "s" : time ( ) , "r" : None , "c" : code , } from acorn import msg record ( "__main__.{}" . format ( n ) , entry , diff = True ) msg . info ( entry , 1 )'
}
"""
Downloads last month
519
Edit dataset card