|
|
|
|
|
import os |
|
import io |
|
import shutil |
|
from typing import Optional |
|
import requests |
|
import hashlib |
|
import math |
|
import multiprocessing as mp |
|
import json |
|
import re |
|
import tarfile |
|
import resource |
|
|
|
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) |
|
resource.setrlimit(resource.RLIMIT_NOFILE, (1 << 16, rlimit[1])) |
|
|
|
from ase.io import read |
|
import numpy as np |
|
import torch |
|
import h5py |
|
from materials_toolkit.data import HDF5Dataset |
|
from materials_toolkit.data.datasets import MaterialsProjectData |
|
|
|
zip_file = "mp.2019.04.01.json.zip" |
|
url = "https://figshare.com/ndownloader/articles/8097992/versions/2" |
|
|
|
|
|
def download_raw_mp(path: Optional[str] = "."): |
|
filename = os.path.join(path, zip_file) |
|
|
|
sha1 = hashlib.sha1() |
|
|
|
if os.path.exists(filename): |
|
with open(filename, "rb") as f: |
|
while True: |
|
data = f.read(1 << 20) |
|
if not data: |
|
break |
|
sha1.update(data) |
|
return sha1.hexdigest() |
|
|
|
r = requests.get(url, stream=True) |
|
|
|
with open(zip_file, "wb") as f: |
|
total_length = int(r.headers.get("content-length")) |
|
for i, chunk in enumerate(r.iter_content(chunk_size=1 << 20)): |
|
if chunk: |
|
sha1.update(chunk) |
|
f.write(chunk) |
|
f.flush() |
|
print( |
|
f"[{i+1}/{int(math.ceil(total_length/(1<<20)))}] downloading {zip_file} ..." |
|
) |
|
|
|
|
|
def unzip(path: Optional[str] = "."): |
|
temp_dir = os.path.join(path, "unzipped") |
|
|
|
os.makedirs(temp_dir, exist_ok=True) |
|
|
|
if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json.zip")): |
|
print("unzip mp.2019.04.01.json.zip") |
|
shutil.unpack_archive(zip_file, temp_dir) |
|
|
|
if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json")): |
|
print("unzip mp.2019.04.01.json") |
|
shutil.unpack_archive( |
|
os.path.join(temp_dir, "mp.2019.04.01.json.zip"), |
|
temp_dir, |
|
) |
|
|
|
|
|
def gen_structure_from_json(filename: str, chunksize: Optional[int] = 1 << 20): |
|
stack = None |
|
with open(filename, "r") as fp: |
|
count = 0 |
|
fp.seek(0, os.SEEK_END) |
|
total = int(math.ceil(fp.tell() / chunksize)) |
|
fp.seek(0, os.SEEK_SET) |
|
|
|
while True: |
|
data = fp.read(chunksize) |
|
|
|
print(f"[{count}/{total}] processing {filename} ...") |
|
count += 1 |
|
|
|
if len(data) == 0: |
|
break |
|
|
|
if stack is None: |
|
stack = data[data.find("{") + 1 :] |
|
else: |
|
stack += data |
|
|
|
splited = re.split(r"}\s*,\s*{", stack) |
|
for elem in splited[:-1]: |
|
yield "{" + elem + "}" |
|
|
|
stack = splited[-1] |
|
|
|
stack = stack[: stack.rfind("}")] |
|
yield "{" + stack + "}" |
|
|
|
|
|
def parse_structure(json_str: str) -> MaterialsProjectData: |
|
data = json.loads(json_str) |
|
struct = read(io.StringIO(data["structure"]), format="cif") |
|
|
|
cell = torch.from_numpy(struct.cell.array).unsqueeze(0).float() |
|
x = torch.from_numpy(struct.get_scaled_positions()).float() |
|
z = torch.from_numpy(struct.get_atomic_numbers()).int() |
|
material_id = torch.tensor( |
|
[int(data["material_id"].split("-")[1])], dtype=torch.long |
|
) |
|
energy_pa = torch.tensor([data["formation_energy_per_atom"]], dtype=torch.float) |
|
|
|
return MaterialsProjectData( |
|
pos=x, z=z, cell=cell, material_id=material_id, energy_pa=energy_pa |
|
) |
|
|
|
|
|
def process(path: Optional[str] = "."): |
|
mp_dir = os.path.join(path, "materials-project") |
|
processed_dir = os.path.join(mp_dir, "processed") |
|
|
|
os.makedirs(processed_dir, exist_ok=True) |
|
|
|
if (not os.path.exists(os.path.join(processed_dir, "batching.json"))) or not ( |
|
os.path.exists(os.path.join(processed_dir, "data.hdf5")) |
|
): |
|
results = [ |
|
parse_structure(elem) |
|
for elem in gen_structure_from_json( |
|
os.path.join(path, "unzipped/mp.2019.04.01.json") |
|
) |
|
] |
|
|
|
HDF5Dataset.create_dataset(processed_dir, results) |
|
|
|
dataset = HDF5Dataset(mp_dir) |
|
dataset.compute_convex_hulls() |
|
|
|
|
|
def compress(path: Optional[str] = "."): |
|
output_file = os.path.join(path, "materials-project.tar.gz") |
|
|
|
if os.path.exists(output_file): |
|
return |
|
|
|
print("compress into materials-project.tar.gz") |
|
with tarfile.open(output_file, "w:gz") as tar: |
|
tar.add( |
|
os.path.join(path, "materials-project/processed/batching.json"), |
|
"batching.json", |
|
) |
|
tar.add( |
|
os.path.join(path, "materials-project/processed/data.hdf5"), "data.hdf5" |
|
) |
|
|
|
|
|
download_raw_mp() |
|
unzip() |
|
process() |
|
compress() |
|
|