{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data_files = {\n", " \"train\": \"./train.txt\",\n", " \"val\": \"./val.txt\",\n", " \"test\": \"./test.txt\",\n", "}\n", "ds = load_dataset(\"text\", data_files=data_files)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds['train'] = ds['train'].rename_column('text', 'SMILE')\n", "ds['val'] = ds['val'].rename_column('text', 'SMILE')\n", "ds['test'] = ds['test'].rename_column('text', 'SMILE')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import selfies as sf\n", "\n", "def try_convert(row):\n", " selfie = None\n", " try:\n", " selfie = sf.encoder(row['SMILE'])\n", " except:\n", " pass\n", "\n", " return {'SELFIE': selfie}\n", "\n", "# Alongside the SMILES, we also need to convert them to SELFIES\n", "# ds['train'] = ds['train'].add_column('SELFIE', ds['train'].map(try_convert, num_proc=8))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds['train'] = ds['train'].map(try_convert, num_proc=8)\n", "ds['val'] = ds['val'].map(try_convert, num_proc=8)\n", "ds['test'] = ds['test'].map(try_convert, num_proc=8)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Drop the rows where the conversion failed\n", "ds['train'] = ds['train'].filter(lambda row: row['SELFIE'] is not None)\n", "ds['val'] = ds['val'].filter(lambda row: row['SELFIE'] is not None)\n", "ds['test'] = ds['test'].filter(lambda row: row['SELFIE'] is not None)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from tokenizers import Tokenizer\n", "\n", "tokenizer = Tokenizer.from_pretrained(\"haydn-jones/GuacamolSELFIETokenizer\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "unk_id = tokenizer.token_to_id('')\n", "\n", "# Drop any rows where the tokenization has an token\n", "ds['train'] = ds['train'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n", "ds['val'] = ds['val'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)\n", "ds['test'] = ds['test'].filter(lambda row: unk_id not in tokenizer.encode(row['SELFIE']).ids, num_proc=8)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds.save_to_disk('./guacamol')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "repo_id = \"haydn-jones/Guacamol\"\n", "\n", "# Push the dataset to the repo\n", "ds.push_to_hub(repo_id)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "ddpm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.6" } }, "nbformat": 4, "nbformat_minor": 2 }