jolma / jolma.py
thewall's picture
Upload jolma.py
aa97d53
raw
history blame
7.09 kB
import os
import json
import re
import pandas as pd
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{jolma2010multiplexed,
title={Multiplexed massively parallel SELEX for characterization of human transcription factor binding specificities},
author={Jolma, Arttu and Kivioja, Teemu and Toivonen, Jarkko and Cheng, Lu and Wei, Gonghong and Enge, Martin and \
Taipale, Mikko and Vaquerizas, Juan M and Yan, Jian and Sillanp{\"a}{\"a}, Mikko J and others},
journal={Genome research},
volume={20},
number={6},
pages={861--873},
year={2010},
publisher={Cold Spring Harbor Lab}
}
"""
_DESCRIPTION = """\
PRJEB3289
https://www.ebi.ac.uk/ena/browser/view/PRJEB3289
Data that has been generated by HT-SELEX experiments (see Jolma et al. 2010. PMID: 20378718 for description of method) \
that has been now used to generate transcription factor binding specificity models for most of the high confidence \
human transcription factors. Sequence data is composed of reads generated with Illumina Genome Analyzer IIX and \
HiSeq2000 instruments. Samples are composed of single read sequencing of synthetic DNA fragments with a fixed length \
randomized region or samples derived from such a initial library by selection with a sequence specific DNA binding \
protein. Originally multiple samples with different "barcode" tag sequences were run on the same Illumina sequencing \
lane but the released files have been already de-multiplexed, and the constant regions and "barcodes" of each sequence \
have been cut out of the sequencing reads to facilitate the use of data. Some of the files are composed of reads from \
multiple different sequencing lanes and due to this each of the names of the individual reads have been edited to show \
the flowcell and lane that was used to generate it. Barcodes and oligonucleotide designs are indicated in the names of \
individual entries. Depending of the selection ligand design, the sequences in each of these fastq-files are either \
14, 20, 30 or 40 bases long and had different flanking regions in both sides of the sequence. Each run entry is named \
in either of the following ways: Example 1) "BCL6B_DBD_AC_TGCGGG20NGA_1", where name is composed of following fields \
ProteinName_CloneType_Batch_BarcodeDesign_SelectionCycle. This experiment used barcode ligand TGCGGG20NGA, where both \
of the variable flanking constant regions are indicated as they were on the original sequence-reads. This ligand has \
been selected for one round of HT-SELEX using recombinant protein that contained the DNA binding domain of \
human transcription factor BCL6B. It also tells that the experiment was performed on batch of experiments named as "AC".\
Example 2) 0_TGCGGG20NGA_0 where name is composed of (zero)_BarcodeDesign_(zero) These sequences have been generated \
from sequencing of the initial non-selected pool. Same initial pools have been used in multiple experiments that were \
on different batches, thus for example this background sequence pool is the shared background for all of the following \
samples. BCL6B_DBD_AC_TGCGGG20NGA_1, ZNF784_full_AE_TGCGGG20NGA_3, DLX6_DBD_Y_TGCGGG20NGA_4 and MSX2_DBD_W_TGCGGG20NGA_2
"""
_URL = "ftp://ftp.sra.ebi.ac.uk/vol1/run/"
"ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR173/ERR173154/CTCF_full_AJ_TAGCGA20NGCT_1.fastq.gz"
# _FORWARD_PRIMER = "TAATACGACTCACTATAGGGAGCAGGAGAGAGGTCAGATG"
# _REVERSE_PRIMER = "CCTATGCGTGCTAGTGTGA"
# _DESIGN_LENGTH = 30
config = datasets.load_dataset(path="thewall/deepbindweight", split="all")
info = pd.read_excel(config['selex'][0], index_col=0)
_URLS = {}
_DESIGN_LENGTH = {}
pattern = re.compile("(\d+)")
for idx, row in info.iterrows():
sra_id = idx
file = row["file"]
_URLS[sra_id] = "/".join([_URL, sra_id[:6], sra_id, file])
_DESIGN_LENGTH[sra_id] = int(pattern.search(row["Ligand"]).group(0))
class JolmaConfig(datasets.BuilderConfig):
def __init__(self, url, sra_id="ERR173157", length_match=True, filter_N=True, design_length=None, file=None, **kwargs):
super(JolmaConfig, self).__init__(**kwargs)
self.url = url
self.sra_id = sra_id
self.length_match = length_match
self.design_length = design_length
self.filter_N = filter_N
self.file = file
class Jolma(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
JolmaConfig(name=key, url=_URLS[key], design_length=_DESIGN_LENGTH[key], file=info.loc[key]['file']) for key in _URLS
]
DEFAULT_CONFIG_NAME = "ERR173157"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"identifier": datasets.Value("string"),
"seq": datasets.Value("string"),
"quality": datasets.Value("string"),
}
),
homepage="https://www.ebi.ac.uk/ena/browser/view/PRJEB3289",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = None
if getattr(self.config, "data_dir") is not None:
# downloaded_files = os.path.join(self.config.data_dir, self.config.sra_id, self.config.file)
downloaded_files = dl_manager.extract(os.path.join(self.config.data_dir, self.config.sra_id, self.config.file))
logger.info(f"Load from {downloaded_files}")
if downloaded_files is None or not os.path.exists(downloaded_files):
logger.info(f"Download from {self.config.url}")
downloaded_files = dl_manager.download_and_extract(self.config.url)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
key = 0
with open(filepath, encoding="utf-8") as f:
ans = {"id": key}
for i, line in enumerate(f):
if line.startswith("@") and i%4==0:
ans["identifier"] = line[1:].strip()
elif i%4==1:
ans["seq"] = line.strip()
elif i%4==3:
ans["quality"] = line.strip()
if self.filter_fn(ans):
yield key, ans
key += 1
ans = {"id": key}
def filter_fn(self, example):
seq = example["seq"]
if self.config.length_match and len(seq)!=self.config.design_length:
return False
if self.config.filter_N and "N" in seq:
return False
return True
if __name__=="__main__":
from datasets import load_dataset
dataset = load_dataset("jolma.py", name="ERR173157", split="all")
# dataset = load_dataset("jolma.py", name="ERR173157", split="all", data_dir="/root/autodl-fs/ERP001824-461")