Laureηt commited on
Commit
ee41569
0 Parent(s):

Initial commit

Browse files
Files changed (8) hide show
  1. .envrc +1 -0
  2. .gitattributes +54 -0
  3. .gitignore +163 -0
  4. README.md +34 -0
  5. RxRx1.py +152 -0
  6. flake.lock +61 -0
  7. flake.nix +21 -0
  8. test.py +5 -0
.envrc ADDED
@@ -0,0 +1 @@
 
 
1
+ use flake
.gitattributes ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .direnv
2
+
3
+ # https://github.com/github/gitignore/blob/main/Python.gitignore
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__/
6
+ *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # poetry
101
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105
+ #poetry.lock
106
+
107
+ # pdm
108
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109
+ #pdm.lock
110
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111
+ # in version control.
112
+ # https://pdm.fming.dev/#use-with-ide
113
+ .pdm.toml
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ task_categories:
4
+ - image-classification
5
+ size_categories:
6
+ - 10M<n<100M
7
+ tags:
8
+ - biology
9
+ - drug
10
+ - cells
11
+ ---
12
+
13
+ [![DOI](https://zenodo.org/badge/DOI/10.48550/arXiv.2301.05768.svg)](https://doi.org/10.48550/arXiv.2301.05768)
14
+
15
+ # RxRx1: A Dataset for Evaluating Experimental Batch Correction Methods
16
+
17
+ **Homepage**: https://www.rxrx.ai/rxrx1 \
18
+ **Publication Date**: 2019-06 \
19
+ **License**: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode) \
20
+ **Citation**:
21
+ ```bibtex
22
+ @misc{sypetkowski2023rxrx1,
23
+ title = {RxRx1: A Dataset for Evaluating Experimental Batch Correction Methods},
24
+ author = {Maciej Sypetkowski and Morteza Rezanejad and Saber Saberian and Oren Kraus and John Urbanik and James Taylor and Ben Mabey and Mason Victors and Jason Yosinski and Alborz Rezazadeh Sereshkeh and Imran Haque and Berton Earnshaw},
25
+ year = {2023},
26
+ eprint = {2301.05768},
27
+ archiveprefix = {arXiv},
28
+ primaryclass = {cs.CV}
29
+ }
30
+ ```
31
+
32
+ ## Description
33
+
34
+ High-throughput screening techniques are commonly used to obtain large quantities of data in many fields of biology. It is well known that artifacts arising from variability in the technical execution of different experimental batches within such screens confound these observations and can lead to invalid biological conclusions. It is therefore necessary to account for these batch effects when analyzing outcomes. In this paper we describe RxRx1, a biological dataset designed specifically for the systematic study of batch effect correction methods. The dataset consists of 125,510 high-resolution fluorescence microscopy images of human cells under 1,138 genetic perturbations in 51 experimental batches across 4 cell types. Visual inspection of the images alone clearly demonstrates significant batch effects. We propose a classification task designed to evaluate the effectiveness of experimental batch correction methods on these images and examine the performance of a number of correction methods on this task. Our goal in releasing RxRx1 is to encourage the development of effective experimental batch correction methods that generalize well to unseen experimental batches.
RxRx1.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ import datasets
3
+ import pandas as pd
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+ _DL_URL = "https://storage.googleapis.com/rxrx/rxrx1"
8
+
9
+ _BASE_URLS = {
10
+ "metadata": f"{_DL_URL}/rxrx1-metadata.zip",
11
+ "embeddings": f"{_DL_URL}/rxrx1-dl-embeddings.zip",
12
+ "images": f"{_DL_URL}/rxrx1-images.zip",
13
+ }
14
+
15
+ _HOMEPAGE = "https://www.rxrx.ai/rxrx1"
16
+
17
+ _DESCRIPTION = """
18
+ RxRx1 is the first dataset released by Recursion in the RxRx.ai series and was the topic of the NeurIPS 2019 CellSignal competition. It contains 125,510 images of 6-channel fluorescent cellular microscopy, taken from four kinds of cells perturbed by 1,138 siRNAs. The goal of the competition was to train models that could identify which siRNA was used in a given image taken from an experimental batch not seen in the training data. For more information about RxRx1 please visit RxRx.ai.
19
+ RxRx1 is part of a larger set of Recursion datasets that can be found at RxRx.ai and on GitHub. For questions about this dataset and others please email info@rxrx.ai.
20
+ """
21
+
22
+ _LICENSE = "CC BY NC SA 4.0"
23
+
24
+ _VERSION = datasets.Version("0.1.0")
25
+
26
+ _CITATION = """
27
+ @misc{sypetkowski2023rxrx1,
28
+ title = {RxRx1: A Dataset for Evaluating Experimental Batch Correction Methods},
29
+ author = {Maciej Sypetkowski and Morteza Rezanejad and Saber Saberian and Oren Kraus and John Urbanik and James Taylor and Ben Mabey and Mason Victors and Jason Yosinski and Alborz Rezazadeh Sereshkeh and Imran Haque and Berton Earnshaw},
30
+ year = {2023},
31
+ eprint = {2301.05768},
32
+ archiveprefix = {arXiv},
33
+ primaryclass = {cs.CV}
34
+ }
35
+ """
36
+
37
+ _WELL_TYPES = [
38
+ "treatment",
39
+ "positive_control",
40
+ "negative_control",
41
+ ]
42
+
43
+ _N_FEATURES = 128
44
+ _N_CHANNELS = 6
45
+
46
+ def get_image_path(image_dir, row, channel):
47
+ """Returns path to image."""
48
+ experiment = row["experiment"]
49
+ plate = row["plate"]
50
+ well = row["well"]
51
+ site = row["site"]
52
+ return image_dir / experiment /f"Plate{plate}" / f"{well}_s{site}_w{channel}.png"
53
+
54
+ class RxRx1(datasets.GeneratorBasedBuilder):
55
+ """RxRx1 dataset."""
56
+
57
+ DEFAULT_WRITER_BATCH_SIZE = 50
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ homepage=_HOMEPAGE,
62
+ description=_DESCRIPTION,
63
+ citation=_CITATION,
64
+ license=_LICENSE,
65
+ version=_VERSION,
66
+ features=datasets.Features(
67
+ {
68
+ "image": datasets.Array3D(shape=(512, 512, _N_CHANNELS), dtype="uint8"),
69
+ "site_id": datasets.Value("string"),
70
+ "well_id": datasets.Value("string"),
71
+ "cell_type": datasets.Value("string"),
72
+ "experiment": datasets.Value("string"),
73
+ "plate": datasets.Value("int32"),
74
+ "well": datasets.Value("string"),
75
+ "site": datasets.Value("int32"),
76
+ "well_type": datasets.ClassLabel(names=_WELL_TYPES),
77
+ "sirna": datasets.Value("string"),
78
+ "sirna_id": datasets.Value("int32"),
79
+ "embeddings": datasets.Sequence(feature=datasets.Value("float32"), length=_N_FEATURES),
80
+ }
81
+ ),
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ """Returns SplitGenerators."""
86
+ # download and extract archives
87
+ archives = {
88
+ name: pathlib.Path(dl_manager.download_and_extract(url)) / "rxrx1"
89
+ for name, url in _BASE_URLS.items()
90
+ }
91
+
92
+ # load dataframes
93
+ df_metadata = pd.read_csv(archives["metadata"] / "metadata.csv")
94
+ df_embeddings = pd.read_csv(archives["embeddings"] / "embeddings.csv")
95
+
96
+ # merge dataframes
97
+ df = pd.merge(df_metadata, df_embeddings, on="site_id")
98
+
99
+ # split dataframes
100
+ df_train = df[df["dataset"] == "train"].drop("dataset", axis=1)
101
+ df_test = df[df["dataset"] == "test"].drop("dataset", axis=1)
102
+
103
+ # # get image path
104
+ image_dir = archives["images"] / "images"
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "dataframe_rows": list(df_train.iterrows()),
111
+ "image_dir": image_dir,
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ gen_kwargs={
117
+ "dataframe_rows": list(df_test.iterrows()),
118
+ "image_dir": image_dir,
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, dataframe_rows, image_dir):
124
+ """Generate images and labels for splits."""
125
+ # loop over rows in dataframe
126
+ for (i, row) in dataframe_rows:
127
+ # retreive image from 6 grayscale images
128
+ image = np.stack([
129
+ Image.open(get_image_path(image_dir, row, channel))
130
+ for channel in range(1, _N_CHANNELS + 1)
131
+ ], axis=-1)
132
+
133
+ embeddings = np.array([
134
+ row[f"feature_{i}"]
135
+ for i in range(_N_FEATURES)
136
+ ])
137
+
138
+ # yield example: image + embeddings + metadata
139
+ yield i, {
140
+ "image": image,
141
+ "embeddings": embeddings,
142
+ "site_id": row["site_id"],
143
+ "well_id": row["well_id"],
144
+ "cell_type": row["cell_type"],
145
+ "experiment": row["experiment"],
146
+ "plate": row["plate"],
147
+ "well": row["well"],
148
+ "site": row["site"],
149
+ "well_type": row["well_type"],
150
+ "sirna": row["sirna"],
151
+ "sirna_id": row["sirna_id"],
152
+ }
flake.lock ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nodes": {
3
+ "flake-utils": {
4
+ "inputs": {
5
+ "systems": "systems"
6
+ },
7
+ "locked": {
8
+ "lastModified": 1694529238,
9
+ "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
10
+ "owner": "numtide",
11
+ "repo": "flake-utils",
12
+ "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
13
+ "type": "github"
14
+ },
15
+ "original": {
16
+ "owner": "numtide",
17
+ "repo": "flake-utils",
18
+ "type": "github"
19
+ }
20
+ },
21
+ "nixpkgs": {
22
+ "locked": {
23
+ "lastModified": 1694767346,
24
+ "narHash": "sha256-5uH27SiVFUwsTsqC5rs3kS7pBoNhtoy9QfTP9BmknGk=",
25
+ "owner": "NixOS",
26
+ "repo": "nixpkgs",
27
+ "rev": "ace5093e36ab1e95cb9463863491bee90d5a4183",
28
+ "type": "github"
29
+ },
30
+ "original": {
31
+ "owner": "NixOS",
32
+ "ref": "nixos-unstable",
33
+ "repo": "nixpkgs",
34
+ "type": "github"
35
+ }
36
+ },
37
+ "root": {
38
+ "inputs": {
39
+ "flake-utils": "flake-utils",
40
+ "nixpkgs": "nixpkgs"
41
+ }
42
+ },
43
+ "systems": {
44
+ "locked": {
45
+ "lastModified": 1681028828,
46
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
47
+ "owner": "nix-systems",
48
+ "repo": "default",
49
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
50
+ "type": "github"
51
+ },
52
+ "original": {
53
+ "owner": "nix-systems",
54
+ "repo": "default",
55
+ "type": "github"
56
+ }
57
+ }
58
+ },
59
+ "root": "root",
60
+ "version": 7
61
+ }
flake.nix ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ inputs = {
3
+ nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
4
+ flake-utils.url = "github:numtide/flake-utils";
5
+ };
6
+
7
+ outputs = { self, nixpkgs, flake-utils }:
8
+ flake-utils.lib.eachDefaultSystem (system:
9
+ let pkgs = nixpkgs.legacyPackages.${system};
10
+ in {
11
+ devShell = pkgs.mkShell {
12
+ buildInputs = with pkgs; [
13
+ python311
14
+ python311Packages.datasets
15
+ python311Packages.pillow
16
+ python311Packages.pandas
17
+ python311Packages.numpy
18
+ ];
19
+ };
20
+ });
21
+ }
test.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ ds = datasets.load_dataset('./RxRx1.py', num_proc=16)
4
+
5
+ print(ds)