|
|
|
|
|
|
|
import sys |
|
import os |
|
import pickle |
|
import re |
|
import torch |
|
import random |
|
import gzip |
|
from os.path import exists, join, getsize, isfile, isdir, abspath, basename |
|
from typing import Dict, Union, Optional, List, Tuple, Mapping |
|
import numpy as np |
|
import pandas as pd |
|
from tqdm.auto import trange, tqdm |
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
from typing import Dict, Union, Optional, List, Tuple, Mapping |
|
import datasets |
|
|
|
def get_md5(aa_str): |
|
""" |
|
Calculate MD5 values for protein sequence |
|
""" |
|
import hashlib |
|
assert isinstance(aa_str, str), aa_str |
|
|
|
aa_str = aa_str.upper() |
|
return hashlib.md5(aa_str.encode('utf-8')).hexdigest() |
|
|
|
def load_fasta(seqFn, rem_tVersion=False, load_annotation=False, full_line_as_id=False): |
|
""" |
|
seqFn -- Fasta file or input handle (with readline implementation) |
|
rem_tVersion -- Remove version information. ENST000000022311.2 => ENST000000022311 |
|
load_annotation -- Load sequence annotation |
|
full_line_as_id -- Use the full head line (starts with >) as sequence ID. Can not be specified simutanouly with load_annotation |
|
|
|
Return: |
|
{tid1: seq1, ...} if load_annotation==False |
|
{tid1: seq1, ...},{tid1: annot1, ...} if load_annotation==True |
|
""" |
|
if load_annotation and full_line_as_id: |
|
raise RuntimeError("Error: load_annotation and full_line_as_id can not be specified simutanouly") |
|
if rem_tVersion and full_line_as_id: |
|
raise RuntimeError("Error: rem_tVersion and full_line_as_id can not be specified simutanouly") |
|
|
|
fasta = {} |
|
annotation = {} |
|
cur_tid = '' |
|
cur_seq = '' |
|
|
|
if isinstance(seqFn, str): |
|
IN = open(seqFn) |
|
elif hasattr(seqFn, 'readline'): |
|
IN = seqFn |
|
else: |
|
raise RuntimeError(f"Expected seqFn: {type(seqFn)}") |
|
for line in IN: |
|
if line[0] == '>': |
|
if cur_tid != '': |
|
fasta[cur_tid] = re.sub(r"\s", "", cur_seq) |
|
cur_seq = '' |
|
data = line[1:-1].split(None, 1) |
|
cur_tid = line[1:-1] if full_line_as_id else data[0] |
|
annotation[cur_tid] = data[1] if len(data)==2 else "" |
|
if rem_tVersion and '.' in cur_tid: |
|
cur_tid = ".".join(cur_tid.split(".")[:-1]) |
|
elif cur_tid != '': |
|
cur_seq += line.rstrip() |
|
|
|
if isinstance(seqFn, str): |
|
IN.close() |
|
|
|
if cur_seq != '': |
|
fasta[cur_tid] = re.sub(r"\s", "", cur_seq) |
|
|
|
if load_annotation: |
|
return fasta, annotation |
|
else: |
|
return fasta |
|
|
|
def load_msa_txt(file_or_stream, load_id=False, load_annot=False, sort=False): |
|
""" |
|
Read msa txt file |
|
|
|
Parmeters |
|
-------------- |
|
file_or_stream: file or stream to read (with read method) |
|
load_id: read identity and return |
|
|
|
Return |
|
-------------- |
|
msa: list of msa sequences, the first sequence in msa is the query sequence |
|
id_arr: Identity of msa sequences |
|
annotations: Annotations of msa sequences |
|
""" |
|
msa = [] |
|
id_arr = [] |
|
annotations = [] |
|
|
|
if hasattr(file_or_stream, 'read'): |
|
lines = file_or_stream.read().strip().split('\n') |
|
elif file_or_stream.endswith('.gz'): |
|
with gzip.open(file_or_stream) as IN: |
|
lines = IN.read().decode().strip().split('\n') |
|
else: |
|
with open(file_or_stream) as IN: |
|
lines = IN.read().strip().split('\n') |
|
|
|
|
|
for idx,line in enumerate(lines): |
|
data = line.strip().split() |
|
if idx == 0: |
|
assert len(data) == 1, f"Expect 1 element for the 1st line, but got {data} in {file_or_stream}" |
|
q_seq = data[0] |
|
else: |
|
if len(data) >= 2: |
|
id_arr.append( float(data[1]) ) |
|
else: |
|
assert len(q_seq) == len(data[0]) |
|
id_ = round(np.mean([ r1==r2 for r1,r2 in zip(q_seq, data[0]) ]), 3) |
|
id_arr.append(id_) |
|
msa.append( data[0] ) |
|
if len(data) >= 3: |
|
annot = " ".join(data[2:]) |
|
annotations.append( annot ) |
|
else: |
|
annotations.append(None) |
|
|
|
id_arr = np.array(id_arr, dtype=np.float64) |
|
if sort: |
|
id_order = np.argsort(id_arr)[::-1] |
|
msa = [ msa[i] for i in id_order ] |
|
id_arr = id_arr[id_order] |
|
annotations = [ annotations[i] for i in id_order ] |
|
msa = [q_seq] + msa |
|
|
|
outputs = [ msa ] |
|
if load_id: |
|
outputs.append( id_arr ) |
|
if load_annot: |
|
outputs.append( annotations ) |
|
if len(outputs) == 1: |
|
return outputs[0] |
|
return outputs |
|
|
|
|
|
_CITATION = """ |
|
""" |
|
|
|
|
|
_DESCRIPTION = """ |
|
Contact map prediction aims to determine whether two residues, $i$ and $j$, are in contact or not, based on their distance with a certain threshold ($<$8 Angstrom). This task is an important part of the early Alphafold version for structural prediction. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/genbio-ai/contact_prediction_binary_rag" |
|
|
|
_LICENSE = "Apache license 2.0" |
|
|
|
class DownStreamConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for downstream taks dataset.""" |
|
|
|
def __init__(self, *args, **kwargs): |
|
"""BuilderConfig downstream tasks dataset. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(*args, name=f"downstream", **kwargs) |
|
|
|
class DownStreamTasks(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.1.0") |
|
BUILDER_CONFIG_CLASS = DownStreamConfig |
|
BUILDER_CONFIGS = [ DownStreamConfig() ] |
|
DEFAULT_CONFIG_NAME = None |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"seq": datasets.Value("string"), |
|
"label": datasets.Array2D(shape=(None, 2), dtype='int32'), |
|
"msa": datasets.Sequence(datasets.Value("string")), |
|
"str_emb": datasets.Array2D(shape=(None, 384), dtype='float32'), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
train_parquet_file = dl_manager.download(f"data/train-00000-of-00001.parquet") |
|
valid_parquet_file = dl_manager.download(f"data/valid-00000-of-00001.parquet") |
|
test_parquet_file = dl_manager.download(f"data/test-00000-of-00001.parquet") |
|
msa_path = dl_manager.download_and_extract(f"msa.tar") |
|
|
|
str_file = dl_manager.download(f"md5_to_str.fasta") |
|
codebook_file = dl_manager.download(f"codebook.pt") |
|
|
|
assert os.path.exists(join(msa_path, 'msa')) |
|
msa_path = join(msa_path, 'msa') |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"parquet_file": train_parquet_file, |
|
"msa_path": msa_path, |
|
"str_file": str_file, |
|
"codebook_file": codebook_file |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"parquet_file": valid_parquet_file, |
|
"msa_path": msa_path, |
|
"str_file": str_file, |
|
"codebook_file": codebook_file |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"parquet_file": test_parquet_file, |
|
"msa_path": msa_path, |
|
"str_file": str_file, |
|
"codebook_file": codebook_file |
|
} |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, parquet_file, msa_path, str_file, codebook_file): |
|
|
|
dataset = datasets.Dataset.from_parquet(parquet_file) |
|
md5_to_str = load_fasta(str_file) |
|
codebook = torch.load(codebook_file, 'cpu', weights_only=True).numpy() |
|
|
|
for key, item in enumerate(dataset): |
|
seq = item['seq'] |
|
label = item['label'] |
|
md5_val = get_md5(seq) |
|
if md5_val not in md5_to_str or md5_to_str[md5_val] == "": |
|
str_emb = np.zeros([len(seq), 384], dtype=np.float32) |
|
else: |
|
str_toks = np.array([ int(x) for x in md5_to_str[md5_val].split('-')]) |
|
str_emb = codebook[str_toks] |
|
|
|
msa = load_msa_txt(join(msa_path, md5_val+'.txt.gz')) |
|
assert len(msa[0]) == len(seq), f"Error: {len(msa[0])} != {len(seq)}" |
|
assert len(msa[0]) == str_emb.shape[0], f"Error: {len(msa[0])} != {str_emb.shape[0]}" |
|
|
|
yield key, { |
|
"seq": seq, |
|
"label": label, |
|
"msa": msa, |
|
"str_emb": str_emb |
|
} |
|
|
|
def _as_dataset( |
|
self, |
|
split: Optional[datasets.Split] = None, |
|
**kwargs |
|
) -> datasets.Dataset: |
|
dataset = super()._as_dataset(split=split, **kwargs) |
|
dataset.set_format( |
|
type="numpy", |
|
columns=["label", "str_emb"], |
|
output_all_columns=True |
|
) |
|
return dataset |
|
|