Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Libraries:
Datasets
License:
File size: 4,069 Bytes
517d89b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from tqdm import tqdm
from datasets import Dataset
 
"""to run inside XLCOST_DATA folder after downloading XLCost data from this repo https://github.com/reddy-lab-code-research/XLCoST"""


class Example(object):
    """A single training/test example."""
    def __init__(self,
                 idx,
                 source,
                 target,
                 ):
        self.idx = idx
        self.source = source
        self.target = target

def read_examples(filename):
    """Read examples from filename."""
    examples=[]
    assert len(filename.split(','))==2
    src_filename = filename.split(',')[0]
    trg_filename = filename.split(',')[1]
    idx = 0
    with open(src_filename) as f1,open(trg_filename) as f2:
            for line1,line2 in zip(f1,f2):
                examples.append(
                Example(
                        idx = idx,
                        source=line1.strip(),
                        target=line2.strip(),
                        ) 
                )
                idx+=1
    return examples

def create_data(filename):
    examples = read_examples(filename)
    text = []
    code = []
    print(len(examples))
    for i in tqdm(range(len(examples))): 
        text.append(examples[i].source)
        code.append(examples[i].target)
    data = {"text": text, "code": code}
    data = Dataset.from_dict(data)
    return data

if __name__ == "__main__":
    #clone xlcost-text-to-code hub repo
    LANG = ["Python", "C", "C#", "Java", "PHP", "Javascript", "C++"]
    EXTENSION = ["py", "c", "cs", "java", "php", "js", "cpp"]

    for i in range(len(LANG)):
        # for each language this saves train test and validation subsets for both snippet and program levels
        lang = LANG[i]
        ext = EXTENSION[i]
        print(f"language: {lang}")
        if lang == "C#":
            path_snippet = f"Csharp-snippet-level"
            path_program = f"Csharp-program-level"
        else:
            path_snippet = f"{lang}-snippet-level"
            path_program = f"{lang}-program-level"

        train_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/train-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/train-{lang}-comment-tok.{ext}"
        valid_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/val-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/val-{lang}-comment-tok.{ext}"
        test_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/test-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/test-{lang}-comment-tok.{ext}"

        train = create_data(train_filename)
        valid = create_data(valid_filename)
        test = create_data(test_filename)

        train.to_json(f"xlcost-text-to-code/data/{path_snippet}/train.json", lines=True)
        valid.to_json(f"xlcost-text-to-code/data/{path_snippet}/valid.json", lines=True)
        test.to_json(f"xlcost-text-to-code/data/{path_snippet}/test.json", lines=True)

        train_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/train-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/train-{lang}-desc-tok.{ext}"
        valid_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/val-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/val-{lang}-desc-tok.{ext}"
        test_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/test-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/test-{lang}-desc-tok.{ext}"

        train = create_data(train_filename)
        valid = create_data(valid_filename)
        test = create_data(test_filename)

        train.to_json(f"xlcost-text-to-code/data/{path_program}/train.json", lines=True)
        valid.to_json(f"xlcost-text-to-code/data/{path_program}/valid.json", lines=True)
        test.to_json(f"xlcost-text-to-code/data/{path_program}/test.json", lines=True)

        #push to hub the folder xlcost (containing data/ and xlcost.py dataset builder script)