FireProtDB2 / src /03_export_fasta.py
drake463's picture
final pipeline and updated subsets
ae74a72
#!/usr/bin/env python3
"""
Generate FASTA file with all protein sequences from previous step (for mmseqs2 clustering)
Outputs:
- A FASTA file of all proteins with UniProt sequence that was able to be pulled
Usage:
python 03_export_fasta.py \
--input ../data/fireprot_with_sequences.parquet \
--output ../data/proteins.fasta
Notes:
- This script has the option to include only residues which have consistent length of Uniprot sequence and sequence length specified in the original CSV.
"""
import argparse
import pandas as pd
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", default="../data/fireprotdb_with_sequences.parquet")
ap.add_argument("--output_fasta", default="../data/proteins.fasta")
ap.add_argument("--only_length_match", action="store_true")
args = ap.parse_args()
df = pd.read_parquet(args.input)
# keep only rows with sequence
df = df[df["sequence"].notna()].copy()
if args.only_length_match and "length_match" in df.columns:
df = df[df["length_match"] == True].copy()
# one sequence per uniprotkb; fallback to sequence_id if needed
df["uniprotkb"] = df["uniprotkb"].astype("string").fillna("").str.strip()
df["sequence_id"] = df["sequence_id"].astype("string").fillna("").str.strip()
# prefer uniprotkb as id, else sequence_id
df["protein_id"] = df["uniprotkb"]
df.loc[df["protein_id"] == "", "protein_id"] = "seqid:" + df.loc[df["protein_id"] == "", "sequence_id"]
df.loc[df["protein_id"] == "seqid:", "protein_id"] = "unknown"
# dedupe by protein_id (keep first)
prot = df.drop_duplicates(subset=["protein_id"])[["protein_id", "sequence"]]
with open(args.output_fasta, "w") as f:
for _, r in prot.iterrows():
pid = r["protein_id"]
seq = r["sequence"]
if not isinstance(seq, str) or not seq:
continue
f.write(f">{pid}\n{seq}\n")
print(f"Wrote {len(prot):,} proteins to {args.output_fasta}")
if __name__ == "__main__":
main()