ncoop57 commited on
Commit
5b35f10
1 Parent(s): 2a82acd

Add ability to generate get methods for each repo using repos_commits config

Browse files
Files changed (1) hide show
  1. athena_data.py +20 -10
athena_data.py CHANGED
@@ -24,12 +24,15 @@ import sys
24
 
25
  csv.field_size_limit(sys.maxsize)
26
 
 
 
27
  from function_parser.language_data import LANGUAGE_METADATA
28
  from function_parser.parsers.java_parser import JavaParser
29
  from function_parser.process import DataProcessor
30
  from git import Git, Repo
31
  from glob import glob
32
  from tree_sitter import Language
 
33
 
34
  LANG = "java"
35
  JAVA_LANG = Language(
@@ -147,14 +150,15 @@ class NewDataset(datasets.GeneratorBasedBuilder):
147
  data_dir = os.path.join(data_dir, "repos-commits")
148
  if self.config.name == "repos_commits" and not os.path.exists(os.path.join(data_dir, "repos")):
149
  # Clone all repositories
150
- check_output(
151
  [
152
  "bash",
153
  "clone.sh",
154
- "repo_names.txt",
155
  ],
156
  cwd=data_dir,
157
  )
 
158
  return [
159
  datasets.SplitGenerator(
160
  name=datasets.Split.TRAIN,
@@ -175,7 +179,9 @@ class NewDataset(datasets.GeneratorBasedBuilder):
175
  with open(file_path, encoding="utf-8") as f:
176
  csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
177
  next(csv_reader, None) # skip header
178
- for row_id, row in enumerate(csv_reader):
 
 
179
  repo, parent_commit, commit, changes = row
180
  changes = ast.literal_eval(changes)
181
  # print(changes)
@@ -188,15 +194,19 @@ class NewDataset(datasets.GeneratorBasedBuilder):
188
  }
189
  elif self.config.name == "repos_commits":
190
  repo_path = os.path.join(data_dir, "repos", repo)
191
- # Otherwise, parse the project
192
- g = Git(repo_path)
193
- g.checkout(self.commit)
 
 
 
 
194
 
195
  indexes = []
196
- files = glob(f"{repo_path}/**/*.{LANGUAGE_METADATA[LANG]['ext']}")
197
  sha = None
198
  for f in files:
199
- definitions = FUNC_PROCESSOR.get_function_definitions(str(f))
200
  if definitions is None:
201
  continue
202
 
@@ -220,6 +230,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
220
  }
221
  )
222
  for _, row in df.iterrows():
 
223
  yield row_id, {
224
  "repo": repo,
225
  "parent_commit": parent_commit,
@@ -229,5 +240,4 @@ class NewDataset(datasets.GeneratorBasedBuilder):
229
  "code": row["code"],
230
  "code_tokens": row["code_tokens"],
231
  "docstring": row["docstring"],
232
- }
233
- data = pd.concat([repo.definitions for repo in self.repos])
 
24
 
25
  csv.field_size_limit(sys.maxsize)
26
 
27
+ import pandas as pd
28
+
29
  from function_parser.language_data import LANGUAGE_METADATA
30
  from function_parser.parsers.java_parser import JavaParser
31
  from function_parser.process import DataProcessor
32
  from git import Git, Repo
33
  from glob import glob
34
  from tree_sitter import Language
35
+ from subprocess import check_output
36
 
37
  LANG = "java"
38
  JAVA_LANG = Language(
 
150
  data_dir = os.path.join(data_dir, "repos-commits")
151
  if self.config.name == "repos_commits" and not os.path.exists(os.path.join(data_dir, "repos")):
152
  # Clone all repositories
153
+ output = check_output(
154
  [
155
  "bash",
156
  "clone.sh",
157
+ "repos.txt",
158
  ],
159
  cwd=data_dir,
160
  )
161
+ # print(output)
162
  return [
163
  datasets.SplitGenerator(
164
  name=datasets.Split.TRAIN,
 
179
  with open(file_path, encoding="utf-8") as f:
180
  csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
181
  next(csv_reader, None) # skip header
182
+ row_id = -1
183
+ for _, row in enumerate(csv_reader):
184
+ row_id += 1
185
  repo, parent_commit, commit, changes = row
186
  changes = ast.literal_eval(changes)
187
  # print(changes)
 
194
  }
195
  elif self.config.name == "repos_commits":
196
  repo_path = os.path.join(data_dir, "repos", repo)
197
+ try:
198
+ # Otherwise, parse the project
199
+ g = Git(repo_path)
200
+ g.checkout(commit, force=True)
201
+ except Exception as e:
202
+ print(e)
203
+ continue
204
 
205
  indexes = []
206
+ files = glob(f"{repo_path}/**/*.{LANGUAGE_METADATA[LANG]['ext']}", recursive=True)
207
  sha = None
208
  for f in files:
209
+ definitions = FUNC_PROCESSOR.get_function_definitions(f)
210
  if definitions is None:
211
  continue
212
 
 
230
  }
231
  )
232
  for _, row in df.iterrows():
233
+ row_id += 1
234
  yield row_id, {
235
  "repo": repo,
236
  "parent_commit": parent_commit,
 
240
  "code": row["code"],
241
  "code_tokens": row["code_tokens"],
242
  "docstring": row["docstring"],
243
+ }