|
import os |
|
import sys |
|
import requests |
|
from tqdm import tqdm |
|
|
|
subdir = 'data' |
|
if not os.path.exists(subdir): |
|
os.makedirs(subdir) |
|
subdir = subdir.replace('\\','/') |
|
|
|
for ds in [ |
|
'webtext', |
|
'small-117M', 'small-117M-k40', |
|
'medium-345M', 'medium-345M-k40', |
|
'large-762M', 'large-762M-k40', |
|
'xl-1542M', 'xl-1542M-k40', |
|
]: |
|
for split in ['train', 'valid', 'test']: |
|
filename = ds + "." + split + '.jsonl' |
|
r = requests.get("https://storage.googleapis.com/gpt-2/output-dataset/v1/" + filename, stream=True) |
|
|
|
with open(os.path.join(subdir, filename), 'wb') as f: |
|
file_size = int(r.headers["content-length"]) |
|
chunk_size = 1000 |
|
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar: |
|
|
|
for chunk in r.iter_content(chunk_size=chunk_size): |
|
f.write(chunk) |
|
pbar.update(chunk_size) |