import argparse import gzip import json from sklearn.model_selection import GroupShuffleSplit def read_groups(): groups = [set()] for line in open("schema-groups.txt"): if line.strip() != "": groups[-1].add(line.strip()) else: groups.append(set()) return groups def sample(random_state, train_pct): groups = read_groups() next_id = len(groups) names = [] name_ids = [] for line in gzip.open("all.jsonl.gz", "rt"): obj = json.loads(line) names.append(obj["name"]) found = False for (i, group) in enumerate(groups): if obj["name"] in group: assert(not found) found = True name_ids.append(i) if not found: name_ids.append(next_id) next_id += 1 gss = GroupShuffleSplit(n_splits=10, train_size=train_pct, random_state=random_state) train_idx, test_idx = next(gss.split(names, groups=name_ids)) train_file = gzip.open("train.jsonl.gz", "wt") val_file = gzip.open("validation.jsonl.gz", "wt") for (idx, line) in enumerate(gzip.open("all.jsonl.gz", "rt")): if idx in train_idx: train_file.write(line) elif idx in test_idx: val_file.write(line) train_file.close() val_file.close() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--train_pct", type=float, default=0.8) parser.add_argument("--random_state", type=int, default=16) args = parser.parse_args() sample(args.random_state, args.train_pct)