File size: 7,118 Bytes
aa97d53
 
 
 
 
ab39bdb
aa97d53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab39bdb
 
 
aa97d53
 
 
 
 
ab39bdb
aa97d53
 
 
 
 
 
ab39bdb
 
 
aa97d53
ab39bdb
aa97d53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab39bdb
 
 
 
 
 
 
 
 
aa97d53
 
ab39bdb
 
 
 
aa97d53
ab39bdb
 
aa97d53
 
ab39bdb
 
 
aa97d53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab39bdb
aa97d53
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import os
import json
import re
import pandas as pd
import datasets
from functools import cached_property

logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@article{jolma2010multiplexed,
  title={Multiplexed massively parallel SELEX for characterization of human transcription factor binding specificities},
  author={Jolma, Arttu and Kivioja, Teemu and Toivonen, Jarkko and Cheng, Lu and Wei, Gonghong and Enge, Martin and \
  Taipale, Mikko and Vaquerizas, Juan M and Yan, Jian and Sillanp{\"a}{\"a}, Mikko J and others},
  journal={Genome research},
  volume={20},
  number={6},
  pages={861--873},
  year={2010},
  publisher={Cold Spring Harbor Lab}
}
"""

_DESCRIPTION = """\
PRJEB3289
https://www.ebi.ac.uk/ena/browser/view/PRJEB3289
Data that has been generated by HT-SELEX experiments (see Jolma et al. 2010. PMID: 20378718 for description of method) \
that has been now used to generate transcription factor binding specificity models for most of the high confidence \
human transcription factors. Sequence data is composed of reads generated with Illumina Genome Analyzer IIX and \
HiSeq2000 instruments. Samples are composed of single read sequencing of synthetic DNA fragments with a fixed length \
randomized region or samples derived from such a initial library by selection with a sequence specific DNA binding \
protein. Originally multiple samples with different "barcode" tag sequences were run on the same Illumina sequencing \
lane but the released files have been already de-multiplexed, and the constant regions and "barcodes" of each sequence \
have been cut out of the sequencing reads to facilitate the use of data. Some of the files are composed of reads from \
multiple different sequencing lanes and due to this each of the names of the individual reads have been edited to show \
the flowcell and lane that was used to generate it. Barcodes and oligonucleotide designs are indicated in the names of \
individual entries. Depending of the selection ligand design, the sequences in each of these fastq-files are either \
14, 20, 30 or 40 bases long and had different flanking regions in both sides of the sequence. Each run entry is named \
in either of the following ways: Example 1) "BCL6B_DBD_AC_TGCGGG20NGA_1", where name is composed of following fields \
ProteinName_CloneType_Batch_BarcodeDesign_SelectionCycle. This experiment used barcode ligand TGCGGG20NGA, where both \
of the variable flanking constant regions are indicated as they were on the original sequence-reads. This ligand has \
been selected for one round of HT-SELEX using recombinant protein that contained the DNA binding domain of \
human transcription factor BCL6B. It also tells that the experiment was performed on batch of experiments named as "AC".\
 Example 2) 0_TGCGGG20NGA_0 where name is composed of (zero)_BarcodeDesign_(zero) These sequences have been generated \
 from sequencing of the initial non-selected pool. Same initial pools have been used in multiple experiments that were \
 on different batches, thus for example this background sequence pool is the shared background for all of the following \
 samples. BCL6B_DBD_AC_TGCGGG20NGA_1, ZNF784_full_AE_TGCGGG20NGA_3, DLX6_DBD_Y_TGCGGG20NGA_4 and MSX2_DBD_W_TGCGGG20NGA_2
"""

_URL = "ftp://ftp.sra.ebi.ac.uk/vol1/run/"
"ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR173/ERR173154/CTCF_full_AJ_TAGCGA20NGCT_1.fastq.gz"
# _FORWARD_PRIMER = "TAATACGACTCACTATAGGGAGCAGGAGAGAGGTCAGATG"
# _REVERSE_PRIMER = "CCTATGCGTGCTAGTGTGA"
# _DESIGN_LENGTH = 30

_DOWNLODE_MANAGER = datasets.DownloadManager()
_RESOURCE_URL = "https://huggingface.co/datasets/thewall/DeepBindWeight/resolve/main"
SELEX_INFO_FILE = _DOWNLODE_MANAGER.download(f"{_RESOURCE_URL}/ERP001824-deepbind.xlsx")

pattern = re.compile("(\d+)")


class JolmaConfig(datasets.BuilderConfig):
    def __init__(self, length_match=True, filter_N=True, **kwargs):
        super(JolmaConfig, self).__init__(**kwargs)
        self.length_match = length_match
        self.filter_N = filter_N


class Jolma(datasets.GeneratorBasedBuilder):

    SELEX_INFO = pd.read_excel(SELEX_INFO_FILE, index_col=0)

    BUILDER_CONFIGS = [
        JolmaConfig(name=index) for index in SELEX_INFO.index
    ]

    DEFAULT_CONFIG_NAME = "ERR173157"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "identifier": datasets.Value("string"),
                    "seq": datasets.Value("string"),
                    "quality": datasets.Value("string"),
                }
            ),
            homepage="https://www.ebi.ac.uk/ena/browser/view/PRJEB3289",
            citation=_CITATION,
        )

    @cached_property
    def selex_info(self):
        return self.SELEX_INFO.loc[self.config.name]

    @cached_property
    def design_length(self):
        selex_info = self.selex_info
        return int(pattern.search(selex_info["Ligand"]).group(0))

    def _split_generators(self, dl_manager):
        downloaded_files = None
        sra_id = self.config.name
        selex_info = self.selex_info
        file = selex_info['file']

        if getattr(self.config, "data_dir") is not None:
            path = os.path.join(self.config.data_dir, sra_id, file)
            downloaded_files = dl_manager.extract(path)
            logger.info(f"Load from {downloaded_files}")
        if downloaded_files is None or not os.path.exists(downloaded_files):
            downloaded_url = "/".join([_URL, sra_id[:6], sra_id, file])
            logger.info(f"Download from {downloaded_url}")
            downloaded_files = dl_manager.download_and_extract(downloaded_url)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        key = 0
        with open(filepath, encoding="utf-8") as f:
            ans = {"id": key}
            for i, line in enumerate(f):
                if line.startswith("@") and i%4==0:
                    ans["identifier"] = line[1:].strip()
                elif i%4==1:
                    ans["seq"]  = line.strip()
                elif i%4==3:
                    ans["quality"] = line.strip()
                    if self.filter_fn(ans):
                        yield key, ans
                        key += 1
                    ans = {"id": key}

    def filter_fn(self, example):
        seq = example["seq"]
        if self.config.length_match and len(seq)!=self.design_length:
            return False
        if self.config.filter_N and "N" in seq:
            return False
        return True


if __name__=="__main__":
    from datasets import load_dataset
    dataset = load_dataset("jolma.py", name="ERR173157", split="all")
    # dataset = load_dataset("jolma.py", name="ERR173157", split="all", data_dir="/root/autodl-fs/ERP001824-461")