File size: 5,659 Bytes
e67edda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
#!/usr/bin/env python3
import json
from pathlib import Path
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
def iter_commit_rows(snapshot_dir: Path, diff_dir: Path, repo: str):
# Generator yielding single-row dicts
for snapshot_path in sorted(snapshot_dir.glob('pr-*.json')):
pr = json.loads(snapshot_path.read_text(encoding='utf-8'))
pr_number = pr.get('number')
if pr_number is None:
continue
commits = pr.get('commits', {}).get('nodes', [])
for node in commits:
commit = node.get('commit', {})
oid = commit.get('oid')
if oid:
diff_file = diff_dir / f"{oid}.diff"
commit['diff'] = diff_file.read_text(encoding='utf-8').strip() if diff_file.exists() else ''
events = []
# collect events
for c in pr.get('comments', {}).get('nodes', []):
ts = c.get('createdAt')
if ts: events.append(('comment', ts, c))
for rt in pr.get('reviewThreads', {}).get('nodes', []):
for r in rt.get('comments', {}).get('nodes', []):
ts = r.get('createdAt')
if ts: events.append(('review', ts, r))
for node in commits:
ts = node.get('commit', {}).get('committedDate')
if ts: events.append(('commit', ts, node))
events.sort(key=lambda e: e[1])
history = []
for kind, ts, data in events:
history.append((kind, ts, data))
if kind != 'commit':
continue
c = data['commit']
oid = c.get('oid')
diff_text = c.get('diff', '')
msg = c.get('message', '')
if not oid or not diff_text.strip():
continue
# build prompt
prompt_parts = [
f"Title: {pr.get('title', '')}",
f"Body: {pr.get('body', '')}",
]
labels = pr.get('labels') or []
if labels:
prompt_parts.append("Labels: " + ", ".join(labels))
# events since last commit
last_idx = next((i for i in range(len(history)-2, -1, -1) if history[i][0] == 'commit'), None)
seg = history[last_idx+1:-1] if last_idx is not None else history[:-1]
if last_idx is not None:
prev = history[last_idx][2]['commit']
prompt_parts.append(
f"Last commit: {prev.get('message')}\nDiff:\n{prev.get('diff', '')}"
)
for ekind, _, edata in seg:
if ekind == 'comment':
body = edata.get('body', '').strip()
if body:
prompt_parts.append(f"Comment: {body}")
elif ekind == 'review':
path = edata.get('path', '')
review_body = edata.get('body', '').strip()
hunk = (edata.get('diffHunk') or '').strip()
prompt_parts.append(
f"Review on {path}: {review_body}\nDiff:\n{hunk}"
)
author = c.get('author', {}) or {}
yield {
'prompt': '\n'.join(prompt_parts),
'completion': f"Diff:\n{diff_text}",
'repo': repo,
'pr_number': pr_number,
'title': pr.get('title', ''),
'body': pr.get('body', ''),
'created_at': pr.get('createdAt', ''),
'closed_at': pr.get('closedAt', ''),
'merged_at': pr.get('mergedAt', ''),
'author': author.get('login', ''),
'state': pr.get('state', ''),
'additions': pr.get('additions', 0),
'deletions': pr.get('deletions', 0),
'changed_files': pr.get('changedFiles', 0),
'head_ref': pr.get('headRefName', ''),
'labels': ", ".join(labels),
'completion_commit': oid,
}
def main():
BASE_DIR = Path(__file__).resolve().parent
snapshot_dir = BASE_DIR.parent / 'data' / 'raw-data' / 'prs'
diff_dir = BASE_DIR.parent / 'data' / 'raw-data' / 'diffs'
dataset_dir = BASE_DIR.parent / 'data' / 'dataset'
dataset_dir.mkdir(parents=True, exist_ok=True)
# define schema
schema = pa.schema([
('prompt', pa.string()),
('completion', pa.string()),
('repo', pa.string()),
('pr_number', pa.int64()),
('title', pa.string()),
('body', pa.string()),
('created_at', pa.string()),
('closed_at', pa.string()),
('merged_at', pa.string()),
('author', pa.string()),
('state', pa.string()),
('additions', pa.int64()),
('deletions', pa.int64()),
('changed_files', pa.int64()),
('head_ref', pa.string()),
('labels', pa.string()),
('completion_commit', pa.string()),
])
train_writer = pq.ParquetWriter(str(dataset_dir / 'train.parquet'), schema)
test_writer = pq.ParquetWriter(str(dataset_dir / 'test.parquet'), schema)
for row in iter_commit_rows(snapshot_dir, diff_dir, 'dotnet/runtime'):
table = pa.Table.from_pydict({k: [v] for k, v in row.items()}, schema)
# route by hash of commit ID
if hash(row['completion_commit']) % 5 == 0:
test_writer.write_table(table)
else:
train_writer.write_table(table)
train_writer.close()
test_writer.close()
print(f"Wrote train.parquet and test.parquet to {dataset_dir}")
if __name__ == '__main__':
main() |