Asteri2themoon commited on
Commit
fa6ee2d
1 Parent(s): 77bc438

first version of the dump

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. .gitignore +4 -0
  3. README.md +44 -0
  4. download-and-process.py +219 -0
  5. materials-project.tar.gz +3 -0
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ materials-project.tar.gz filter=lfs diff=lfs merge=lfs -text
57
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ unzipped
2
+ *.hdf5
3
+ *.json
4
+ *.zip
README.md CHANGED
@@ -1,3 +1,47 @@
1
  ---
2
  license: mit
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
+ tags:
4
+ - chemistry
5
+ pretty_name: Materials Project
6
+ size_categories:
7
+ - 100K<n<1M
8
  ---
9
+
10
+ # Dataset
11
+
12
+ Materials project (2019 dump)
13
+
14
+ This dataset contains 133420 materials with formation energy per atom.
15
+
16
+ Processed from [mp.2019.04.01.json](https://figshare.com/articles/dataset/Graphs_of_Materials_Project_20190401/8097992)
17
+
18
+ # Download
19
+
20
+ Download link: [materials-project.tar.gz](https://huggingface.co/datasets/materials-toolkits/materials-project/raw/main/materials-project.tar.gz)
21
+
22
+ MD5 checksum `c132f3781f32cd17f3a92aa6501b9531`
23
+
24
+ # Content
25
+
26
+ Bundled in `materials-project.tar.gz`.
27
+
28
+ ## Index (`index.json`)
29
+
30
+ list of dict:
31
+ * `index` (int) => index of the structure in data file.
32
+ * `id` (str) => id of Materials Project.
33
+ * `formula` (str) => formula.
34
+ * `natoms` (int) => number of atoms.
35
+ * `energy_pa` (float) => formation energy per atom.
36
+
37
+ ## Data (`data.hdf5`)
38
+
39
+ fields:
40
+ * `structures` => a group containing structure information.
41
+ * `structures/cell` (float32) => lattice of the material.
42
+ * `structures/natoms` (int32) => number of atoms.
43
+ * `structures/energy_pa` (float32) => formation energy per atom.
44
+ * `structures/atoms_ptr` (int64) => position of the first atom of the structures in the `atoms` group.
45
+ * `atoms` => a group containing information about atoms.
46
+ * `atoms/positions` (float32) => the positions of the atoms.
47
+ * `atoms/atomic_number` (uint8) => the atomic number of the atoms.
download-and-process.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+
3
+ import os
4
+ import io
5
+ import shutil
6
+ from typing import Optional
7
+ import requests
8
+ import hashlib
9
+ import math
10
+ import multiprocessing as mp
11
+ import json
12
+ import re
13
+ import tarfile
14
+
15
+ from ase.io import read
16
+ import numpy as np
17
+ import h5py
18
+
19
+ zip_file = "mp.2019.04.01.json.zip"
20
+ url = "https://figshare.com/ndownloader/articles/8097992/versions/2"
21
+
22
+
23
+ def download_raw_mp(path: Optional[str] = "."):
24
+ filename = os.path.join(path, zip_file)
25
+
26
+ sha1 = hashlib.sha1()
27
+
28
+ if os.path.exists(filename):
29
+ with open(filename, "rb") as f:
30
+ while True:
31
+ data = f.read(1 << 20)
32
+ if not data:
33
+ break
34
+ sha1.update(data)
35
+ return sha1.hexdigest()
36
+
37
+ r = requests.get(url, stream=True)
38
+
39
+ with open(zip_file, "wb") as f:
40
+ total_length = int(r.headers.get("content-length"))
41
+ for i, chunk in enumerate(r.iter_content(chunk_size=1 << 20)):
42
+ if chunk:
43
+ sha1.update(chunk)
44
+ f.write(chunk)
45
+ f.flush()
46
+ print(
47
+ f"[{i+1}/{int(math.ceil(total_length/(1<<20)))}] downloading {zip_file} ..."
48
+ )
49
+
50
+
51
+ def unzip(path: Optional[str] = "."):
52
+ temp_dir = os.path.join(path, "unzipped")
53
+
54
+ os.makedirs(temp_dir, exist_ok=True)
55
+
56
+ if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json.zip")):
57
+ print("unzip mp.2019.04.01.json.zip")
58
+ shutil.unpack_archive(zip_file, temp_dir)
59
+
60
+ if not os.path.exists(os.path.join(temp_dir, "mp.2019.04.01.json")):
61
+ print("unzip mp.2019.04.01.json")
62
+ shutil.unpack_archive(
63
+ os.path.join(temp_dir, "mp.2019.04.01.json.zip"),
64
+ temp_dir,
65
+ )
66
+
67
+
68
+ def gen_structure_from_json(filename: str, chunksize: Optional[int] = 1 << 20):
69
+ stack = None
70
+ with open(filename, "r") as fp:
71
+ count = 0
72
+ fp.seek(0, os.SEEK_END)
73
+ total = int(math.ceil(fp.tell() / chunksize))
74
+ fp.seek(0, os.SEEK_SET)
75
+
76
+ while True:
77
+ data = fp.read(chunksize)
78
+
79
+ print(f"[{count}/{total}] processing {filename} ...")
80
+ count += 1
81
+
82
+ if len(data) == 0:
83
+ break
84
+
85
+ if stack is None:
86
+ stack = data[data.find("{") + 1 :]
87
+ else:
88
+ stack += data
89
+
90
+ splited = re.split(r"}\s*,\s*{", stack)
91
+ for elem in splited[:-1]:
92
+ yield "{" + elem + "}"
93
+
94
+ stack = splited[-1]
95
+
96
+ stack = stack[: stack.rfind("}")]
97
+ yield "{" + stack + "}"
98
+
99
+
100
+ def parse_structure(json_str: str) -> tuple:
101
+ data = json.loads(json_str)
102
+ struct = read(io.StringIO(data["structure"]), format="cif")
103
+
104
+ cell = struct.cell.array
105
+ natoms = len(struct)
106
+ x = struct.get_scaled_positions()
107
+ formula = struct.get_chemical_formula()
108
+ z = struct.get_atomic_numbers()
109
+
110
+ material_id = data["material_id"]
111
+ energy_pa = data["formation_energy_per_atom"]
112
+ return material_id, natoms, formula, cell, x, z, energy_pa
113
+
114
+
115
+ def process_job(input_queue, output_queue):
116
+ while True:
117
+ job = input_queue.get()
118
+
119
+ if job is None:
120
+ break
121
+
122
+ output_queue.put(parse_structure(job))
123
+
124
+
125
+ def process(path: Optional[str] = ".", workers: Optional[int] = max(4, os.cpu_count())):
126
+ if os.path.exists(os.path.join(path, "index.json")) and os.path.exists(
127
+ os.path.join(path, "data.hdf5")
128
+ ):
129
+ return
130
+
131
+ input_queue = mp.Queue()
132
+ output_queue = mp.Queue()
133
+ results = []
134
+
135
+ processes = []
136
+ for _ in range(workers):
137
+ p = mp.Process(target=process_job, args=(input_queue, output_queue))
138
+ p.start()
139
+ processes.append(p)
140
+
141
+ for elem in gen_structure_from_json(
142
+ os.path.join(path, "unzipped/mp.2019.04.01.json")
143
+ ):
144
+ input_queue.put(elem)
145
+
146
+ if input_queue.qsize() > 2 * workers:
147
+ results.append(output_queue.get())
148
+ while not output_queue.empty():
149
+ results.append(output_queue.get())
150
+
151
+ for _ in range(workers):
152
+ input_queue.put(None)
153
+
154
+ for process in processes:
155
+ process.join()
156
+
157
+ while not output_queue.empty():
158
+ results.append(output_queue.get())
159
+
160
+ material_id = [material_id for material_id, _, _, _, _, _, _ in results]
161
+ formula = [formula for _, _, formula, _, _, _, _ in results]
162
+
163
+ natoms = np.array([natoms for _, natoms, _, _, _, _, _ in results], dtype=np.int64)
164
+ atoms_ptr = np.pad(natoms.cumsum(0), (1, 0)).astype(np.int64)
165
+ idx = np.arange(len(results), dtype=np.int64)
166
+
167
+ cell = np.stack(
168
+ [cell for _, _, _, cell, _, _, _ in results], axis=0, dtype=np.float32
169
+ )
170
+ x = np.concatenate([x for _, _, _, _, x, _, _ in results], axis=0, dtype=np.float32)
171
+ z = np.concatenate([z for _, _, _, _, _, z, _ in results], axis=0, dtype=np.int64)
172
+ energy_pa = np.array(
173
+ [energy_pa for _, _, _, _, _, _, energy_pa in results], dtype=np.float32
174
+ )
175
+
176
+ index = [
177
+ {
178
+ "index": int(i),
179
+ "id": str(m_id),
180
+ "formula": str(f),
181
+ "natoms": int(n),
182
+ "energy_pa": float(e),
183
+ }
184
+ for i, m_id, f, n, e in zip(idx, material_id, formula, natoms, energy_pa)
185
+ ]
186
+ with open(os.path.join(path, "index.json"), "w") as fp:
187
+ json.dump(index, fp)
188
+
189
+ f = h5py.File("data.hdf5", "w")
190
+
191
+ structures = f.create_group("structures")
192
+ structures.create_dataset("cell", data=cell, dtype=np.float32)
193
+ structures.create_dataset("natoms", data=natoms, dtype=np.int32)
194
+ structures.create_dataset("energy_pa", data=energy_pa, dtype=np.float32)
195
+ structures.create_dataset("atoms_ptr", data=atoms_ptr, dtype=np.int64)
196
+
197
+ atoms = f.create_group("atoms")
198
+ atoms.create_dataset("positions", data=x, dtype=np.float32)
199
+ atoms.create_dataset("atomic_number", data=z, dtype=np.uint8)
200
+
201
+ f.close()
202
+
203
+
204
+ def compress(path: Optional[str] = "."):
205
+ output_file = os.path.join(path, "materials-project.tar.gz")
206
+
207
+ if os.path.exists(output_file):
208
+ return
209
+
210
+ print("compress into materials-project.tar.gz")
211
+ with tarfile.open(output_file, "w:gz") as tar:
212
+ tar.add(os.path.join(path, "index.json"))
213
+ tar.add(os.path.join(path, "data.hdf5"))
214
+
215
+
216
+ download_raw_mp()
217
+ unzip()
218
+ process()
219
+ compress()
materials-project.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf27bb6a544f3cb28ab686ca98ba0977b8ef0d4445e14a63990c8cb91a4158b
3
+ size 40789247