File size: 4,717 Bytes
fa6ee2d
 
 
 
 
 
 
 
 
 
 
 
 
e40b6e3
 
 
 
fa6ee2d
 
 
e40b6e3
fa6ee2d
c6c45cb
e40b6e3
fa6ee2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e40b6e3
fa6ee2d
 
 
e40b6e3
 
 
 
 
 
 
fa6ee2d
e40b6e3
 
 
fa6ee2d
 
e40b6e3
 
c6c45cb
fa6ee2d
c6c45cb
fa6ee2d
c6c45cb
 
fa6ee2d
c6c45cb
 
 
 
 
 
fa6ee2d
c6c45cb
fa6ee2d
c6c45cb
 
fa6ee2d
 
 
 
 
 
 
 
 
 
c6c45cb
 
 
 
 
 
 
fa6ee2d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
#!/usr/bin/python

import os
import io
import shutil
from typing import Optional
import requests
import hashlib
import math
import multiprocessing as mp
import json
import re
import tarfile
import resource

rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1 << 16, rlimit[1]))

from ase.io import read
import numpy as np
import torch
import h5py
from materials_toolkit.data import HDF5Dataset
from materials_toolkit.data.datasets import MaterialsProjectData

zip_file = "mp.2019.04.01.json.zip"
url = "https://figshare.com/ndownloader/articles/8097992/versions/2"


def download_raw_mp(path: Optional[str] = "."):
    filename = os.path.join(path, zip_file)

    sha1 = hashlib.sha1()

    if os.path.exists(filename):
        with open(filename, "rb") as f:
            while True:
                data = f.read(1 << 20)
                if not data:
                    break
                sha1.update(data)
        return sha1.hexdigest()

    r = requests.get(url, stream=True)

    with open(zip_file, "wb") as f:
        total_length = int(r.headers.get("content-length"))
        for i, chunk in enumerate(r.iter_content(chunk_size=1 << 20)):
            if chunk:
                sha1.update(chunk)
                f.write(chunk)
                f.flush()
                print(
                    f"[{i+1}/{int(math.ceil(total_length/(1<<20)))}] downloading {zip_file} ..."
                )


def unzip(path: Optional[str] = "."):
    temp_dir = os.path.join(path, "unzipped")

    os.makedirs(temp_dir, exist_ok=True)

    if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json.zip")):
        print("unzip mp.2019.04.01.json.zip")
        shutil.unpack_archive(zip_file, temp_dir)

    if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json")):
        print("unzip mp.2019.04.01.json")
        shutil.unpack_archive(
            os.path.join(temp_dir, "mp.2019.04.01.json.zip"),
            temp_dir,
        )


def gen_structure_from_json(filename: str, chunksize: Optional[int] = 1 << 20):
    stack = None
    with open(filename, "r") as fp:
        count = 0
        fp.seek(0, os.SEEK_END)
        total = int(math.ceil(fp.tell() / chunksize))
        fp.seek(0, os.SEEK_SET)

        while True:
            data = fp.read(chunksize)

            print(f"[{count}/{total}] processing {filename} ...")
            count += 1

            if len(data) == 0:
                break

            if stack is None:
                stack = data[data.find("{") + 1 :]
            else:
                stack += data

            splited = re.split(r"}\s*,\s*{", stack)
            for elem in splited[:-1]:
                yield "{" + elem + "}"

            stack = splited[-1]

    stack = stack[: stack.rfind("}")]
    yield "{" + stack + "}"


def parse_structure(json_str: str) -> MaterialsProjectData:
    data = json.loads(json_str)
    struct = read(io.StringIO(data["structure"]), format="cif")

    cell = torch.from_numpy(struct.cell.array).unsqueeze(0).float()
    x = torch.from_numpy(struct.get_scaled_positions()).float()
    z = torch.from_numpy(struct.get_atomic_numbers()).int()
    material_id = torch.tensor(
        [int(data["material_id"].split("-")[1])], dtype=torch.long
    )
    energy_pa = torch.tensor([data["formation_energy_per_atom"]], dtype=torch.float)

    return MaterialsProjectData(
        pos=x, z=z, cell=cell, material_id=material_id, energy_pa=energy_pa
    )


def process(path: Optional[str] = "."):
    mp_dir = os.path.join(path, "materials-project")
    processed_dir = os.path.join(mp_dir, "processed")

    os.makedirs(processed_dir, exist_ok=True)

    if (not os.path.exists(os.path.join(processed_dir, "batching.json"))) or not (
        os.path.exists(os.path.join(processed_dir, "data.hdf5"))
    ):
        results = [
            parse_structure(elem)
            for elem in gen_structure_from_json(
                os.path.join(path, "unzipped/mp.2019.04.01.json")
            )
        ]

        HDF5Dataset.create_dataset(processed_dir, results)

    dataset = HDF5Dataset(mp_dir)
    dataset.compute_convex_hulls()


def compress(path: Optional[str] = "."):
    output_file = os.path.join(path, "materials-project.tar.gz")

    if os.path.exists(output_file):
        return

    print("compress into materials-project.tar.gz")
    with tarfile.open(output_file, "w:gz") as tar:
        tar.add(
            os.path.join(path, "materials-project/processed/batching.json"),
            "batching.json",
        )
        tar.add(
            os.path.join(path, "materials-project/processed/data.hdf5"), "data.hdf5"
        )


download_raw_mp()
unzip()
process()
compress()