haydn-jones commited on
Commit
548dcee
1 Parent(s): 050702c

Upload generate_ds.ipynb

Browse files
Files changed (1) hide show
  1. utils/generate_ds.ipynb +156 -0
utils/generate_ds.ipynb ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from datasets import load_dataset"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "data_files = {\n",
19
+ " \"train\": \"./train.txt\",\n",
20
+ " \"val\": \"./val.txt\",\n",
21
+ " \"test\": \"./test.txt\",\n",
22
+ "}\n",
23
+ "ds = load_dataset(\"text\", data_files=data_files)"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "ds['train'] = ds['train'].rename_column('text', 'SMILE')\n",
33
+ "ds['val'] = ds['val'].rename_column('text', 'SMILE')\n",
34
+ "ds['test'] = ds['test'].rename_column('text', 'SMILE')"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": null,
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "import selfies as sf\n",
44
+ "\n",
45
+ "def try_convert(row):\n",
46
+ " selfie = None\n",
47
+ " try:\n",
48
+ " selfie = sf.encoder(row['SMILE'])\n",
49
+ " except:\n",
50
+ " pass\n",
51
+ "\n",
52
+ " return {'SELFIE': selfie}\n",
53
+ "\n",
54
+ "# Alongside the SMILES, we also need to convert them to SELFIES\n",
55
+ "# ds['train'] = ds['train'].add_column('SELFIE', ds['train'].map(try_convert, num_proc=8))"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "ds['train'] = ds['train'].map(try_convert, num_proc=8)\n",
65
+ "ds['val'] = ds['val'].map(try_convert, num_proc=8)\n",
66
+ "ds['test'] = ds['test'].map(try_convert, num_proc=8)"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": null,
72
+ "metadata": {},
73
+ "outputs": [],
74
+ "source": [
75
+ "# Drop the rows where the conversion failed\n",
76
+ "ds['train'] = ds['train'].filter(lambda row: row['SELFIE'] is not None)\n",
77
+ "ds['val'] = ds['val'].filter(lambda row: row['SELFIE'] is not None)\n",
78
+ "ds['test'] = ds['test'].filter(lambda row: row['SELFIE'] is not None)"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": null,
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "from tokenizers import Tokenizer\n",
88
+ "\n",
89
+ "tokenizer = Tokenizer.from_pretrained(\"haydn-jones/GuacamolSELFIETokenizer\")"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {},
96
+ "outputs": [],
97
+ "source": [
98
+ "unk_id = tokenizer.token_to_id('<UNK>')\n",
99
+ "\n",
100
+ "# Drop any rows where the tokenization has an <UNK> token\n",
101
+ "ds['train'] = ds['train'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n",
102
+ "ds['val'] = ds['val'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n",
103
+ "ds['test'] = ds['test'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": null,
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": [
112
+ "ds.save_to_disk('./guacamol')"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": null,
118
+ "metadata": {},
119
+ "outputs": [],
120
+ "source": [
121
+ "repo_id = \"haydn-jones/Guacamol\"\n",
122
+ "\n",
123
+ "# Push the dataset to the repo\n",
124
+ "ds.push_to_hub(repo_id)"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "code",
129
+ "execution_count": null,
130
+ "metadata": {},
131
+ "outputs": [],
132
+ "source": []
133
+ }
134
+ ],
135
+ "metadata": {
136
+ "kernelspec": {
137
+ "display_name": "ddpm",
138
+ "language": "python",
139
+ "name": "python3"
140
+ },
141
+ "language_info": {
142
+ "codemirror_mode": {
143
+ "name": "ipython",
144
+ "version": 3
145
+ },
146
+ "file_extension": ".py",
147
+ "mimetype": "text/x-python",
148
+ "name": "python",
149
+ "nbconvert_exporter": "python",
150
+ "pygments_lexer": "ipython3",
151
+ "version": "3.11.6"
152
+ }
153
+ },
154
+ "nbformat": 4,
155
+ "nbformat_minor": 2
156
+ }