Upload 1791 files
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- benchmarks/FoldBench/.gitignore +9 -0
- benchmarks/FoldBench/LICENSE +21 -0
- benchmarks/FoldBench/README.md +238 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/LICENSE +208 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_base.py +353 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_data.py +199 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_inference.py +35 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/__init__.py +14 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/config.py +288 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/extend_types.py +55 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/ccd.py +450 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/constants.py +964 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/data_pipeline.py +310 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/dataloader.py +372 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/dataset.py +1100 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/featurizer.py +802 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/filter.py +82 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/infer_data_pipeline.py +220 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_maker.py +296 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_parser.py +608 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_to_feature.py +310 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/msa_featurizer.py +1162 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/msa_utils.py +1416 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/parser.py +1173 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/substructure_perms.py +257 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/tokenizer.py +196 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/utils.py +675 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/clash.py +272 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/lddt_metrics.py +277 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/rmsd.py +260 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/generator.py +332 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/__init__.py +16 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/compat.h +24 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/layer_norm_cuda.cpp +138 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/layer_norm_cuda_kernel.cu +409 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/type_shim.h +246 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/layer_norm.py +134 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/torch_ext_compile.py +55 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/loss.py +1812 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/__init__.py +0 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/confidence.py +321 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/diffusion.py +541 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/embedders.py +256 -0
- benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/frames.py +108 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
benchmarks/FoldBench/evaluation/DockQv2/operations.cp310-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
benchmarks/FoldBench/.gitignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pyc
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.sif
|
| 4 |
+
*.ninja_deps
|
| 5 |
+
*.ninja
|
| 6 |
+
*.ninja_deps
|
| 7 |
+
*.ninja_deps.d
|
| 8 |
+
*.ninja_deps.d.tmp
|
| 9 |
+
*.ninja_deps.d.tmp.d
|
benchmarks/FoldBench/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 FoldBench Contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
benchmarks/FoldBench/README.md
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Benchmarking all-atom biomolecular structure prediction with FoldBench
|
| 2 |
+
|
| 3 |
+

|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
FoldBench is a low-homology benchmark spanning proteins, nucleic acids, ligands, and six major interaction types, enabling assessments that were previously infeasible with task-specific datasets.
|
| 7 |
+
|
| 8 |
+
## 📢 Updates
|
| 9 |
+
+ **2025-12-31**: The evaluation results for RosettaFold3 (latest) have been updated.
|
| 10 |
+
+ **2025-12-05**: The evaluation results for Boltz-2 and OpenFold3-preview have been updated.
|
| 11 |
+
+ **2025-12-04**: FoldBench has been published in [Nature Communications](https://www.nature.com/articles/s41467-025-67127-3).
|
| 12 |
+
|
| 13 |
+
## 🎯 FoldBench Targets
|
| 14 |
+
The FoldBench benchmark targets are open source. This comprehensive dataset, located in the `targets` directory, is organized into two primary collections:
|
| 15 |
+
|
| 16 |
+
### **Interfaces**
|
| 17 |
+
|
| 18 |
+
* **Protein–Protein:** 279 interfaces
|
| 19 |
+
* **Antibody–Antigen:** 172 interfaces
|
| 20 |
+
* **Protein–Ligand:** 558 interfaces
|
| 21 |
+
* **Protein–Peptide:** 51 interfaces
|
| 22 |
+
* **Protein–RNA:** 70 interfaces
|
| 23 |
+
* **Protein–DNA:** 330 interfaces
|
| 24 |
+
|
| 25 |
+
### **Monomeric Structures**
|
| 26 |
+
|
| 27 |
+
* **Protein Monomers:** 330 structures
|
| 28 |
+
* **RNA Monomers:** 15 structures
|
| 29 |
+
* **DNA Monomers:** 14 structures
|
| 30 |
+
|
| 31 |
+
## 🏆 Leaderboard
|
| 32 |
+
|
| 33 |
+
**Evaluation Metrics:** Interface prediction tasks are evaluated by success rate, while monomer prediction tasks use LDDT (Local Distance Difference Test) scores. All results are based on comprehensive evaluations across our low-homology benchmark dataset.
|
| 34 |
+
|
| 35 |
+
### Results on targets released after 2023-01 (full set)
|
| 36 |
+
#### Protein Interactions
|
| 37 |
+
|
| 38 |
+
| Model | Protein-Protein | Antibody–Antigen | Protein-Ligand |
|
| 39 |
+
|:--------------:|:--------------:|:-----:|:--------------:|
|
| 40 |
+
| AlphaFold 3 | 72.93% | 47.90% | 64.90% |
|
| 41 |
+
| Boltz-1 | 68.25% | 33.54% | 55.04% |
|
| 42 |
+
| Chai-1 | 68.53% | 23.64% | 51.23% |
|
| 43 |
+
| HelixFold 3 | 66.27% | 28.40% | 51.82% |
|
| 44 |
+
| Protenix | 68.18% | 34.13% | 50.70% |
|
| 45 |
+
| OpenFold 3 (preview) | 69.96% | 28.83% | 44.49% |
|
| 46 |
+
|
| 47 |
+
#### Nucleic acids
|
| 48 |
+
|
| 49 |
+
| Model | Protein-RNA | Protein-DNA | RNA Monomer | DNA Monomer |
|
| 50 |
+
|:--------------:|:-----------:|:-----------:|:-----------:|:-----------:|
|
| 51 |
+
| AlphaFold 3 | 62.32% | 79.18% | 0.61 | 0.53 |
|
| 52 |
+
| Boltz-1 | 56.90% | 70.97% | 0.44 | 0.34 |
|
| 53 |
+
| Chai-1 | 50.91% | 69.97% | 0.49 | 0.46 |
|
| 54 |
+
| HelixFold 3 | 48.28% | 50.00% | 0.55 | 0.29 |
|
| 55 |
+
| Protenix | 44.78% | 68.39% | 0.59 | 0.44 |
|
| 56 |
+
| OpenFold 3 (preview) | 18.84% | 5.88% | 0.63 | 0.51 |
|
| 57 |
+
|
| 58 |
+
### Results on targets released after 2024-01
|
| 59 |
+
#### Protein Interactions
|
| 60 |
+
| Model | Protein-Protein | Antibody–Antigen | Protein-Ligand |
|
| 61 |
+
|:--------------:|:--------------:|:-----:|:--------------:|
|
| 62 |
+
| AlphaFold 3 | 70.87% | 47.95% | 67.59% |
|
| 63 |
+
| Boltz-1 | 64.10% | 31.43% | 51.33% |
|
| 64 |
+
| Chai-1 | 66.95% | 18.31% | 49.28% |
|
| 65 |
+
| HelixFold 3 | 66.67% | 28.17% | 50.68% |
|
| 66 |
+
| Protenix | 64.80% | 38.36% | 53.25% |
|
| 67 |
+
| OpenFold 3 (preview) | 68.22% | 34.29% | 40.85% |
|
| 68 |
+
| Boltz-2* | 70.54% | 25.00% | 53.90% |
|
| 69 |
+
| RosettaFold3* | 72.44% | 37.50% | 57.28% |
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
#### Nucleic acids
|
| 73 |
+
|
| 74 |
+
| Model | Protein-RNA | Protein-DNA |
|
| 75 |
+
|:--------------:|:-----------:|:-----------:|
|
| 76 |
+
| AlphaFold 3 | 72.50% | 80.45% |
|
| 77 |
+
| Boltz-1 | 70.00% | 69.77% |
|
| 78 |
+
| Chai-1 | 55.56% | 69.14% |
|
| 79 |
+
| HelixFold 3 | 54.29% | 61.18% |
|
| 80 |
+
| Protenix | 56.41% | 67.63% |
|
| 81 |
+
| OpenFold 3 (preview) | 25.00% | 5.81% |
|
| 82 |
+
| Boltz-2* | 76.92% | 73.84% |
|
| 83 |
+
| RosettaFold3*^ | - | 66.07% |
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
*Models marked with * have a training cutoff later than FoldBench's reference date (2023-01-13). FoldBench targets are constructed to ensure **low homology specifically against the PDB data prior to 2023-01-13**. Consequently, models trained on data released after this date may have observed these targets or their close homologs during training (potential data leakage), compromising the low-homology evaluation condition. Results for these models are provided for reference only and should not be directly compared with strictly valid models.
|
| 89 |
+
|
| 90 |
+
**Nucleic acid monomer results are omitted due to insufficient target availability.
|
| 91 |
+
|
| 92 |
+
^Results are not shown due to insufficient targets caused by errors during inference or evaluation stages.
|
| 93 |
+
|
| 94 |
+
**Note:**
|
| 95 |
+
- Interface prediction is evaluated by success rate.
|
| 96 |
+
- Monomer prediction is evaluated by LDDT.
|
| 97 |
+
- Success is defined as:
|
| 98 |
+
- For protein–ligand interfaces: LRMSD < 2 Å and LDDT-PLI > 0.8
|
| 99 |
+
- For all other interfaces: DockQ ≥ 0.23
|
| 100 |
+
- We developed an algorithm to identify and prevent overfitting of models on FoldBench, ensuring fair and reliable evaluation.
|
| 101 |
+
|
| 102 |
+
## 📈 Detailed Performance Analysis
|
| 103 |
+
|
| 104 |
+
### Results on targets released after 2023-01 (full set)
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+
### Results on targets released after 2024-01
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+
## 🚀 Getting Started
|
| 111 |
+
|
| 112 |
+
To get started with FoldBench, clone the repository and set up the Conda environment.
|
| 113 |
+
|
| 114 |
+
```bash
|
| 115 |
+
# 1. Clone the repository
|
| 116 |
+
git clone https://github.com/BEAM-Labs/FoldBench.git
|
| 117 |
+
cd FoldBench
|
| 118 |
+
|
| 119 |
+
# 2. Create and activate the Conda environment for evaluation
|
| 120 |
+
conda env create -f environment.yml
|
| 121 |
+
conda activate foldbench
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
## ⚙️ Evaluation
|
| 125 |
+
You can use our provided evaluation samples to reproduce the evaluation workflow. The final results will be generated in `examples/summary_table.csv`.
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
# Ensure you are in the FoldBench root directory and the conda environment is active
|
| 129 |
+
|
| 130 |
+
# Step 1: Calculate per-target scores from prediction files
|
| 131 |
+
# This uses OpenStructure (ost) and DockQ to score each prediction against its ground truth
|
| 132 |
+
python evaluate.py \
|
| 133 |
+
--targets_dir ./examples/targets \
|
| 134 |
+
--evaluation_dir ./examples/outputs/evaluation \
|
| 135 |
+
--algorithm_name Protenix \
|
| 136 |
+
--ground_truth_dir ./examples/ground_truths
|
| 137 |
+
|
| 138 |
+
# Step 2: Aggregate scores and calculate the final success rates/LDDT
|
| 139 |
+
# This summarizes the results for specified models and tasks into a final table
|
| 140 |
+
python task_score_summary.py \
|
| 141 |
+
--evaluation_dir ./examples/outputs/evaluation \
|
| 142 |
+
--target_dir ./examples/targets \
|
| 143 |
+
--output_path ./examples/summary_table.csv \
|
| 144 |
+
--algorithm_names Protenix \
|
| 145 |
+
--targets interface_protein_ligand interface_protein_dna monomer_protein \
|
| 146 |
+
--metric_type rank
|
| 147 |
+
```
|
| 148 |
+
|
| 149 |
+
### Evaluate more structures
|
| 150 |
+
To evaluate more structures in FoldBench, you'll need to follow these steps:
|
| 151 |
+
|
| 152 |
+
#### **1. Prepare Your Data**
|
| 153 |
+
|
| 154 |
+
* **Edit the target CSV files:** Modify the CSV files located in the `examples/targets` directory. These files should contain information about the structures you want to evaluate.
|
| 155 |
+
* **Download ground truth CIF files:** A package containing the specific original CIF files referenced during the benchmark's creation is available for download here: [FoldBench Referenced CIFs](https://drive.google.com/file/d/17KdWDXKATaeHF6inPxhPHIRuIzeqiJxS/view?usp=sharing). Save these files in the `examples/ground_truths` directory. Ensure the filenames correspond to your data in the CSV files.
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
#### **2. Update Evaluation Outputs**
|
| 159 |
+
|
| 160 |
+
* **Modify `prediction_reference.csv`:** After preparing your data, you'll need to adjust the `./outputs/evaluation/{algorithm_name}/prediction_reference.csv` file to specify the model's ranking scores and the paths to the predicted structures. Please refer to the **[Integrating a New Model into FoldBench](./algorithms/README.md)**.
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
## ✨ Integrating a New Model into FoldBench
|
| 164 |
+
|
| 165 |
+
We enthusiastically welcome community submissions!
|
| 166 |
+
|
| 167 |
+
You can submit your algorithm for us to run the tests.
|
| 168 |
+
|
| 169 |
+
For detailed instructions on how to package your model for submission, please see the contributor's guide:
|
| 170 |
+
**[Integrating a New Model into FoldBench](./algorithms/README.md)**.
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
## 🏠 Repository Structure
|
| 174 |
+
|
| 175 |
+
The FoldBench repository is organized to separate benchmark data, evaluation code, and evaluation samples.
|
| 176 |
+
|
| 177 |
+
```
|
| 178 |
+
FoldBench/
|
| 179 |
+
├── targets/ # FoldBench targets csv files
|
| 180 |
+
│ ├── interface_antibody_antigen.csv
|
| 181 |
+
│ └── ...
|
| 182 |
+
├── algorithms/
|
| 183 |
+
│ ├── algorithm_name/ # Custom model's code and definition files go here
|
| 184 |
+
│ └── ...
|
| 185 |
+
├── examples/
|
| 186 |
+
│ ├── outputs/
|
| 187 |
+
│ │ ├── input/ # Preprocessed inputs for each algorithm
|
| 188 |
+
│ │ │ └── algorithm_name/
|
| 189 |
+
│ │ ├── prediction/ # Model predictions (e.g., .cif files)
|
| 190 |
+
│ │ │ └── algorithm_name/
|
| 191 |
+
│ │ └── evaluation/ # Final scores and summaries
|
| 192 |
+
│ │ └── algorithm_name/
|
| 193 |
+
│ ├── targets/ # Target definitions
|
| 194 |
+
│ ├── ground_truths/ # Ground truth cif files
|
| 195 |
+
│ └── alphafold3_inputs.json # Alphafold3 input json
|
| 196 |
+
├── build_apptainer_images.sh # Script to build all algorithm containers
|
| 197 |
+
├── environment.yml # Conda environment for evaluation scripts
|
| 198 |
+
├── run.sh # Master script to run inference and evaluation
|
| 199 |
+
├── evaluate.py # Prediction evaluation
|
| 200 |
+
├── task_score_summary.py # Benchmark score summary
|
| 201 |
+
└── ...
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
## 🙏 Acknowledgements
|
| 205 |
+
|
| 206 |
+
We gratefully acknowledge the developers of the following projects, which are essential to FoldBench:
|
| 207 |
+
|
| 208 |
+
+ [Alphafold3](https://github.com/google-deepmind/alphafold3)
|
| 209 |
+
+ [Protenix](https://github.com/bytedance/Protenix)
|
| 210 |
+
+ [Chai-1](https://github.com/chaidiscovery/chai-lab)
|
| 211 |
+
+ [Boltz-1/2](https://github.com/jwohlwend/boltz)
|
| 212 |
+
+ [Helixfold3](https://github.com/PaddlePaddle/PaddleHelix/tree/dev/apps/protein_folding/helixfold3)
|
| 213 |
+
+ [OpenFold 3](https://github.com/aqlaboratory/openfold-3)
|
| 214 |
+
+ [OpenStructure](https://git.scicore.unibas.ch/schwede/openstructure)
|
| 215 |
+
+ [DockQ](https://github.com/bjornwallner/DockQ)
|
| 216 |
+
|
| 217 |
+
## 📄 License
|
| 218 |
+
|
| 219 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
| 220 |
+
|
| 221 |
+
The MIT License is a permissive open source license that allows for commercial and non-commercial use, modification, distribution, and private use of the software, provided that the original copyright notice and license terms are included.
|
| 222 |
+
|
| 223 |
+
## ✍️ How to Cite
|
| 224 |
+
|
| 225 |
+
If you use FoldBench in your research, please cite our paper:
|
| 226 |
+
|
| 227 |
+
```bibtex
|
| 228 |
+
@article{xu_benchmarking_2025,
|
| 229 |
+
title = {Benchmarking all-atom biomolecular structure prediction with {FoldBench}},
|
| 230 |
+
issn = {2041-1723},
|
| 231 |
+
url = {https://doi.org/10.1038/s41467-025-67127-3},
|
| 232 |
+
doi = {10.1038/s41467-025-67127-3},
|
| 233 |
+
journal = {Nature Communications},
|
| 234 |
+
author = {Xu, Sheng and Feng, Qiantai and Qiao, Lifeng and Wu, Hao and Shen, Tao and Cheng, Yu and Zheng, Shuangjia and Sun, Siqi},
|
| 235 |
+
month = dec,
|
| 236 |
+
year = {2025},
|
| 237 |
+
}
|
| 238 |
+
```
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/LICENSE
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright 2024 ByteDance and/or its affiliates.
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
| 203 |
+
|
| 204 |
+
Implementation of the `LayerNorm` operators (in protenix/model/layer_norm/) referred to [OneFlow]
|
| 205 |
+
(https://github.com/Oneflow-Inc/oneflow) and [FastFold](https://github.com/hpcaitech/FastFold).
|
| 206 |
+
We used [OpenFold](https://github.com/aqlaboratory/openfold) for some
|
| 207 |
+
(in protenix/openfold_local/) implementations, except the `LayerNorm` part. the worker OneFlow,
|
| 208 |
+
FastFold and openfold are licensed under Apache License 2.0.
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_base.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# pylint: disable=C0114,C0301
|
| 16 |
+
from protenix.config.extend_types import (
|
| 17 |
+
GlobalConfigValue,
|
| 18 |
+
ListValue,
|
| 19 |
+
RequiredValue,
|
| 20 |
+
ValueMaybeNone,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
basic_configs = {
|
| 24 |
+
"project": RequiredValue(str),
|
| 25 |
+
"run_name": RequiredValue(str),
|
| 26 |
+
"base_dir": RequiredValue(str),
|
| 27 |
+
# training
|
| 28 |
+
"eval_interval": RequiredValue(int),
|
| 29 |
+
"log_interval": RequiredValue(int),
|
| 30 |
+
"checkpoint_interval": -1,
|
| 31 |
+
"eval_first": False, # run evaluate() before training steps
|
| 32 |
+
"iters_to_accumulate": 1,
|
| 33 |
+
"eval_only": False,
|
| 34 |
+
"load_checkpoint_path": "",
|
| 35 |
+
"load_ema_checkpoint_path": "",
|
| 36 |
+
"load_strict": False,
|
| 37 |
+
"load_params_only": True,
|
| 38 |
+
"skip_load_step": False,
|
| 39 |
+
"skip_load_optimizer": False,
|
| 40 |
+
"skip_load_scheduler": False,
|
| 41 |
+
"train_confidence_only": False,
|
| 42 |
+
"use_wandb": True,
|
| 43 |
+
"wandb_id": "",
|
| 44 |
+
"seed": 42,
|
| 45 |
+
"deterministic": False,
|
| 46 |
+
"ema_decay": -1.0,
|
| 47 |
+
"eval_ema_only": False, # whether wandb only tracking ema checkpoint metrics
|
| 48 |
+
"ema_mutable_param_keywords": [""],
|
| 49 |
+
}
|
| 50 |
+
data_configs = {
|
| 51 |
+
# Data
|
| 52 |
+
"train_crop_size": 256,
|
| 53 |
+
"test_max_n_token": -1,
|
| 54 |
+
"train_lig_atom_rename": False,
|
| 55 |
+
"train_shuffle_mols": False,
|
| 56 |
+
"train_shuffle_sym_ids": False,
|
| 57 |
+
"test_lig_atom_rename": False,
|
| 58 |
+
"test_shuffle_mols": False,
|
| 59 |
+
"test_shuffle_sym_ids": False,
|
| 60 |
+
}
|
| 61 |
+
optim_configs = {
|
| 62 |
+
# Optim
|
| 63 |
+
"lr": 0.0018,
|
| 64 |
+
"lr_scheduler": "af3",
|
| 65 |
+
"warmup_steps": 10,
|
| 66 |
+
"max_steps": RequiredValue(int),
|
| 67 |
+
"min_lr_ratio": 0.1,
|
| 68 |
+
"decay_every_n_steps": 50000,
|
| 69 |
+
"grad_clip_norm": 10,
|
| 70 |
+
# Optim - Adam
|
| 71 |
+
"adam": {
|
| 72 |
+
"beta1": 0.9,
|
| 73 |
+
"beta2": 0.95,
|
| 74 |
+
"weight_decay": 1e-8,
|
| 75 |
+
"lr": GlobalConfigValue("lr"),
|
| 76 |
+
"use_adamw": False,
|
| 77 |
+
},
|
| 78 |
+
# Optim - LRScheduler
|
| 79 |
+
"af3_lr_scheduler": {
|
| 80 |
+
"warmup_steps": GlobalConfigValue("warmup_steps"),
|
| 81 |
+
"decay_every_n_steps": GlobalConfigValue("decay_every_n_steps"),
|
| 82 |
+
"decay_factor": 0.95,
|
| 83 |
+
"lr": GlobalConfigValue("lr"),
|
| 84 |
+
},
|
| 85 |
+
}
|
| 86 |
+
model_configs = {
|
| 87 |
+
# Model
|
| 88 |
+
"c_s": 384,
|
| 89 |
+
"c_z": 128,
|
| 90 |
+
"c_s_inputs": 449, # c_s_inputs == c_token + 32 + 32 + 1
|
| 91 |
+
"c_atom": 128,
|
| 92 |
+
"c_atompair": 16,
|
| 93 |
+
"c_token": 384,
|
| 94 |
+
"n_blocks": 48,
|
| 95 |
+
"max_atoms_per_token": 24, # DNA G max_atoms = 23
|
| 96 |
+
"no_bins": 64,
|
| 97 |
+
"sigma_data": 16.0,
|
| 98 |
+
"diffusion_batch_size": 48,
|
| 99 |
+
"diffusion_chunk_size": ValueMaybeNone(4), # chunksize of diffusion_batch_size
|
| 100 |
+
"blocks_per_ckpt": ValueMaybeNone(
|
| 101 |
+
1
|
| 102 |
+
), # NOTE: Number of blocks in each activation checkpoint, if None, no checkpointing is performed.
|
| 103 |
+
# switch of kernels
|
| 104 |
+
"use_memory_efficient_kernel": False,
|
| 105 |
+
"use_deepspeed_evo_attention": True,
|
| 106 |
+
"use_flash": False,
|
| 107 |
+
"use_lma": False,
|
| 108 |
+
"use_xformer": False,
|
| 109 |
+
"find_unused_parameters": False,
|
| 110 |
+
"dtype": "bf16", # default training dtype: bf16
|
| 111 |
+
"loss_metrics_sparse_enable": True, # the swicth for both sparse lddt metrics and sparse bond/smooth lddt loss
|
| 112 |
+
"skip_amp": {
|
| 113 |
+
"sample_diffusion": True,
|
| 114 |
+
"confidence_head": True,
|
| 115 |
+
"sample_diffusion_training": True,
|
| 116 |
+
"loss": True,
|
| 117 |
+
},
|
| 118 |
+
"infer_setting": {
|
| 119 |
+
"chunk_size": ValueMaybeNone(
|
| 120 |
+
64
|
| 121 |
+
), # should set to null for normal training and small dataset eval [for efficiency]
|
| 122 |
+
"sample_diffusion_chunk_size": ValueMaybeNone(
|
| 123 |
+
1
|
| 124 |
+
), # should set to null for normal training and small dataset eval [for efficiency]
|
| 125 |
+
"lddt_metrics_sparse_enable": GlobalConfigValue("loss_metrics_sparse_enable"),
|
| 126 |
+
"lddt_metrics_chunk_size": ValueMaybeNone(
|
| 127 |
+
1
|
| 128 |
+
), # only works if loss_metrics_sparse_enable, can set as default 1
|
| 129 |
+
},
|
| 130 |
+
"train_noise_sampler": {
|
| 131 |
+
"p_mean": -1.2,
|
| 132 |
+
"p_std": 1.5,
|
| 133 |
+
"sigma_data": 16.0, # NOTE: in EDM, this is 1.0
|
| 134 |
+
},
|
| 135 |
+
"inference_noise_scheduler": {
|
| 136 |
+
"s_max": 160.0,
|
| 137 |
+
"s_min": 4e-4,
|
| 138 |
+
"rho": 7,
|
| 139 |
+
"sigma_data": 16.0, # NOTE: in EDM, this is 1.0
|
| 140 |
+
},
|
| 141 |
+
"sample_diffusion": {
|
| 142 |
+
"gamma0": 0.8,
|
| 143 |
+
"gamma_min": 1.0,
|
| 144 |
+
"noise_scale_lambda": 1.003,
|
| 145 |
+
"step_scale_eta": 1.5,
|
| 146 |
+
"N_step": 200,
|
| 147 |
+
"N_sample": 5,
|
| 148 |
+
"N_step_mini_rollout": 20,
|
| 149 |
+
"N_sample_mini_rollout": 1,
|
| 150 |
+
},
|
| 151 |
+
"model": {
|
| 152 |
+
"N_model_seed": 1, # for inference
|
| 153 |
+
"N_cycle": 4,
|
| 154 |
+
"input_embedder": {
|
| 155 |
+
"c_atom": GlobalConfigValue("c_atom"),
|
| 156 |
+
"c_atompair": GlobalConfigValue("c_atompair"),
|
| 157 |
+
"c_token": GlobalConfigValue("c_token"),
|
| 158 |
+
},
|
| 159 |
+
"relative_position_encoding": {
|
| 160 |
+
"r_max": 32,
|
| 161 |
+
"s_max": 2,
|
| 162 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 163 |
+
},
|
| 164 |
+
"template_embedder": {
|
| 165 |
+
"c": 64,
|
| 166 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 167 |
+
"n_blocks": 0,
|
| 168 |
+
"dropout": 0.25,
|
| 169 |
+
"blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
|
| 170 |
+
},
|
| 171 |
+
"msa_module": {
|
| 172 |
+
"c_m": 64,
|
| 173 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 174 |
+
"c_s_inputs": GlobalConfigValue("c_s_inputs"),
|
| 175 |
+
"n_blocks": 4,
|
| 176 |
+
"msa_dropout": 0.15,
|
| 177 |
+
"pair_dropout": 0.25,
|
| 178 |
+
"blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
|
| 179 |
+
},
|
| 180 |
+
"pairformer": {
|
| 181 |
+
"n_blocks": GlobalConfigValue("n_blocks"),
|
| 182 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 183 |
+
"c_s": GlobalConfigValue("c_s"),
|
| 184 |
+
"n_heads": 16,
|
| 185 |
+
"dropout": 0.25,
|
| 186 |
+
"blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
|
| 187 |
+
},
|
| 188 |
+
"diffusion_module": {
|
| 189 |
+
"use_fine_grained_checkpoint": True,
|
| 190 |
+
"sigma_data": GlobalConfigValue("sigma_data"),
|
| 191 |
+
"c_token": 768,
|
| 192 |
+
"c_atom": GlobalConfigValue("c_atom"),
|
| 193 |
+
"c_atompair": GlobalConfigValue("c_atompair"),
|
| 194 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 195 |
+
"c_s": GlobalConfigValue("c_s"),
|
| 196 |
+
"c_s_inputs": GlobalConfigValue("c_s_inputs"),
|
| 197 |
+
"initialization": {
|
| 198 |
+
"zero_init_condition_transition": False,
|
| 199 |
+
"zero_init_atom_encoder_residual_linear": False,
|
| 200 |
+
"he_normal_init_atom_encoder_small_mlp": False,
|
| 201 |
+
"he_normal_init_atom_encoder_output": False,
|
| 202 |
+
"glorot_init_self_attention": False,
|
| 203 |
+
"zero_init_adaln": True,
|
| 204 |
+
"zero_init_residual_condition_transition": False,
|
| 205 |
+
"zero_init_dit_output": True,
|
| 206 |
+
"zero_init_atom_decoder_linear": False,
|
| 207 |
+
},
|
| 208 |
+
"atom_encoder": {
|
| 209 |
+
"n_blocks": 3,
|
| 210 |
+
"n_heads": 4,
|
| 211 |
+
},
|
| 212 |
+
"transformer": {
|
| 213 |
+
"n_blocks": 24,
|
| 214 |
+
"n_heads": 16,
|
| 215 |
+
},
|
| 216 |
+
"atom_decoder": {
|
| 217 |
+
"n_blocks": 3,
|
| 218 |
+
"n_heads": 4,
|
| 219 |
+
},
|
| 220 |
+
"blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
|
| 221 |
+
},
|
| 222 |
+
"confidence_head": {
|
| 223 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 224 |
+
"c_s": GlobalConfigValue("c_s"),
|
| 225 |
+
"c_s_inputs": GlobalConfigValue("c_s_inputs"),
|
| 226 |
+
"n_blocks": 4,
|
| 227 |
+
"max_atoms_per_token": GlobalConfigValue("max_atoms_per_token"),
|
| 228 |
+
"pairformer_dropout": 0.0,
|
| 229 |
+
"blocks_per_ckpt": GlobalConfigValue("blocks_per_ckpt"),
|
| 230 |
+
"distance_bin_start": 3.25,
|
| 231 |
+
"distance_bin_end": 52.0,
|
| 232 |
+
"distance_bin_step": 1.25,
|
| 233 |
+
"stop_gradient": True,
|
| 234 |
+
},
|
| 235 |
+
"distogram_head": {
|
| 236 |
+
"c_z": GlobalConfigValue("c_z"),
|
| 237 |
+
"no_bins": GlobalConfigValue("no_bins"),
|
| 238 |
+
},
|
| 239 |
+
},
|
| 240 |
+
}
|
| 241 |
+
perm_configs = {
|
| 242 |
+
# Chain and Atom Permutation
|
| 243 |
+
"chain_permutation": {
|
| 244 |
+
"train": {
|
| 245 |
+
"mini_rollout": True,
|
| 246 |
+
"diffusion_sample": False,
|
| 247 |
+
},
|
| 248 |
+
"test": {
|
| 249 |
+
"diffusion_sample": True,
|
| 250 |
+
},
|
| 251 |
+
"permute_by_pocket": True,
|
| 252 |
+
"configs": {
|
| 253 |
+
"use_center_rmsd": False,
|
| 254 |
+
"find_gt_anchor_first": False,
|
| 255 |
+
"accept_it_as_it_is": False,
|
| 256 |
+
"enumerate_all_anchor_pairs": False,
|
| 257 |
+
"selection_metric": "aligned_rmsd",
|
| 258 |
+
},
|
| 259 |
+
},
|
| 260 |
+
"atom_permutation": {
|
| 261 |
+
"train": {
|
| 262 |
+
"mini_rollout": True,
|
| 263 |
+
"diffusion_sample": False,
|
| 264 |
+
},
|
| 265 |
+
"test": {
|
| 266 |
+
"diffusion_sample": True,
|
| 267 |
+
},
|
| 268 |
+
"permute_by_pocket": True,
|
| 269 |
+
"global_align_wo_symmetric_atom": False,
|
| 270 |
+
},
|
| 271 |
+
}
|
| 272 |
+
loss_configs = {
|
| 273 |
+
"loss": {
|
| 274 |
+
"diffusion_lddt_chunk_size": ValueMaybeNone(1),
|
| 275 |
+
"diffusion_bond_chunk_size": ValueMaybeNone(1),
|
| 276 |
+
"diffusion_chunk_size_outer": ValueMaybeNone(-1),
|
| 277 |
+
"diffusion_sparse_loss_enable": GlobalConfigValue("loss_metrics_sparse_enable"),
|
| 278 |
+
"diffusion_lddt_loss_dense": True, # only set true in initial training for training speed
|
| 279 |
+
"resolution": {"min": 0.1, "max": 4.0},
|
| 280 |
+
"weight": {
|
| 281 |
+
"alpha_confidence": 1e-4,
|
| 282 |
+
"alpha_pae": 0.0, # or 1 in finetuning stage 3
|
| 283 |
+
"alpha_except_pae": 1.0,
|
| 284 |
+
"alpha_diffusion": 4.0,
|
| 285 |
+
"alpha_distogram": 3e-2,
|
| 286 |
+
"alpha_bond": 0.0, # or 1 in finetuning stages
|
| 287 |
+
"smooth_lddt": 1.0, # or 0 in finetuning stages
|
| 288 |
+
},
|
| 289 |
+
"plddt": {
|
| 290 |
+
"min_bin": 0,
|
| 291 |
+
"max_bin": 1.0,
|
| 292 |
+
"no_bins": 50,
|
| 293 |
+
"normalize": True,
|
| 294 |
+
"eps": 1e-6,
|
| 295 |
+
},
|
| 296 |
+
"pde": {
|
| 297 |
+
"min_bin": 0,
|
| 298 |
+
"max_bin": 32,
|
| 299 |
+
"no_bins": 64,
|
| 300 |
+
"eps": 1e-6,
|
| 301 |
+
},
|
| 302 |
+
"resolved": {
|
| 303 |
+
"eps": 1e-6,
|
| 304 |
+
},
|
| 305 |
+
"pae": {
|
| 306 |
+
"min_bin": 0,
|
| 307 |
+
"max_bin": 32,
|
| 308 |
+
"no_bins": 64,
|
| 309 |
+
"eps": 1e-6,
|
| 310 |
+
},
|
| 311 |
+
"diffusion": {
|
| 312 |
+
"mse": {
|
| 313 |
+
"weight_mse": 1 / 3,
|
| 314 |
+
"weight_dna": 5.0,
|
| 315 |
+
"weight_rna": 5.0,
|
| 316 |
+
"weight_ligand": 10.0,
|
| 317 |
+
"eps": 1e-6,
|
| 318 |
+
},
|
| 319 |
+
"bond": {
|
| 320 |
+
"eps": 1e-6,
|
| 321 |
+
},
|
| 322 |
+
"smooth_lddt": {
|
| 323 |
+
"eps": 1e-6,
|
| 324 |
+
},
|
| 325 |
+
},
|
| 326 |
+
"distogram": {
|
| 327 |
+
"min_bin": 2.3125,
|
| 328 |
+
"max_bin": 21.6875,
|
| 329 |
+
"no_bins": 64,
|
| 330 |
+
"eps": 1e-6,
|
| 331 |
+
},
|
| 332 |
+
},
|
| 333 |
+
"metrics": {
|
| 334 |
+
"lddt": {
|
| 335 |
+
"eps": 1e-6,
|
| 336 |
+
},
|
| 337 |
+
"complex_ranker_keys": ListValue(["plddt", "gpde", "ranking_score"]),
|
| 338 |
+
"chain_ranker_keys": ListValue(["chain_ptm", "chain_plddt"]),
|
| 339 |
+
"interface_ranker_keys": ListValue(
|
| 340 |
+
["chain_pair_iptm", "chain_pair_iptm_global", "chain_pair_plddt"]
|
| 341 |
+
),
|
| 342 |
+
"clash": {"af3_clash_threshold": 1.1, "vdw_clash_threshold": 0.75},
|
| 343 |
+
},
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
configs = {
|
| 347 |
+
**basic_configs,
|
| 348 |
+
**data_configs,
|
| 349 |
+
**optim_configs,
|
| 350 |
+
**model_configs,
|
| 351 |
+
**perm_configs,
|
| 352 |
+
**loss_configs,
|
| 353 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_data.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# pylint: disable=C0114,C0301
|
| 16 |
+
import os
|
| 17 |
+
from copy import deepcopy
|
| 18 |
+
|
| 19 |
+
from protenix.config.extend_types import GlobalConfigValue, ListValue
|
| 20 |
+
|
| 21 |
+
default_test_configs = {
|
| 22 |
+
"sampler_configs": {
|
| 23 |
+
"sampler_type": "uniform",
|
| 24 |
+
},
|
| 25 |
+
"cropping_configs": {
|
| 26 |
+
"method_weights": [
|
| 27 |
+
0.0, # ContiguousCropping
|
| 28 |
+
0.0, # SpatialCropping
|
| 29 |
+
1.0, # SpatialInterfaceCropping
|
| 30 |
+
],
|
| 31 |
+
"crop_size": -1,
|
| 32 |
+
},
|
| 33 |
+
"lig_atom_rename": GlobalConfigValue("test_lig_atom_rename"),
|
| 34 |
+
"shuffle_mols": GlobalConfigValue("test_shuffle_mols"),
|
| 35 |
+
"shuffle_sym_ids": GlobalConfigValue("test_shuffle_sym_ids"),
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
default_weighted_pdb_configs = {
|
| 39 |
+
"sampler_configs": {
|
| 40 |
+
"sampler_type": "weighted",
|
| 41 |
+
"beta_dict": {
|
| 42 |
+
"chain": 0.5,
|
| 43 |
+
"interface": 1,
|
| 44 |
+
},
|
| 45 |
+
"alpha_dict": {
|
| 46 |
+
"prot": 3,
|
| 47 |
+
"nuc": 3,
|
| 48 |
+
"ligand": 1,
|
| 49 |
+
},
|
| 50 |
+
"force_recompute_weight": True,
|
| 51 |
+
},
|
| 52 |
+
"cropping_configs": {
|
| 53 |
+
"method_weights": ListValue([0.2, 0.4, 0.4]),
|
| 54 |
+
"crop_size": GlobalConfigValue("train_crop_size"),
|
| 55 |
+
},
|
| 56 |
+
"sample_weight": 0.5,
|
| 57 |
+
"limits": -1,
|
| 58 |
+
"lig_atom_rename": GlobalConfigValue("train_lig_atom_rename"),
|
| 59 |
+
"shuffle_mols": GlobalConfigValue("train_shuffle_mols"),
|
| 60 |
+
"shuffle_sym_ids": GlobalConfigValue("train_shuffle_sym_ids"),
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
# DATA_ROOT_DIR = "/af3-dev/release_data/"
|
| 64 |
+
DATA_ROOT_DIR = "/af3-dev/release_data/"
|
| 65 |
+
# CCD_COMPONENTS_FILE_PATH = '/data1/qtfeng/dev/af3_benchmark/data/components.cif'
|
| 66 |
+
# CCD_COMPONENTS_RDKIT_MOL_FILE_PATH='/data1/qtfeng/dev/af3_benchmark/data/components.cif.rdkit_mol.pkl'
|
| 67 |
+
|
| 68 |
+
CCD_COMPONENTS_FILE_PATH = os.path.join(DATA_ROOT_DIR, "components.v20240608.cif")
|
| 69 |
+
CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
|
| 70 |
+
DATA_ROOT_DIR, "components.v20240608.cif.rdkit_mol.pkl"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# This is a patch in inference stage for users that do not have root permission.
|
| 74 |
+
# If you run
|
| 75 |
+
# ```
|
| 76 |
+
# bash inference_demo.sh
|
| 77 |
+
# ```
|
| 78 |
+
# or
|
| 79 |
+
# ```
|
| 80 |
+
# protenix predict --input examples/example.json --out_dir ./output
|
| 81 |
+
# ````
|
| 82 |
+
# The checkpoint and the data cache will be downloaded to the current code directory.
|
| 83 |
+
if (not os.path.exists(CCD_COMPONENTS_FILE_PATH)) or (
|
| 84 |
+
not os.path.exists(CCD_COMPONENTS_RDKIT_MOL_FILE_PATH)
|
| 85 |
+
):
|
| 86 |
+
print("Try to find the ccd cache data in the code directory for inference.")
|
| 87 |
+
current_file_path = os.path.abspath(__file__)
|
| 88 |
+
current_directory = os.path.dirname(current_file_path)
|
| 89 |
+
code_directory = os.path.dirname(current_directory)
|
| 90 |
+
|
| 91 |
+
data_cache_dir = os.path.join(code_directory, "release_data/ccd_cache")
|
| 92 |
+
CCD_COMPONENTS_FILE_PATH = os.path.join(data_cache_dir, "components.v20240608.cif")
|
| 93 |
+
CCD_COMPONENTS_RDKIT_MOL_FILE_PATH = os.path.join(
|
| 94 |
+
data_cache_dir, "components.v20240608.cif.rdkit_mol.pkl"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
data_configs = {
|
| 99 |
+
"num_dl_workers": 16,
|
| 100 |
+
"epoch_size": 10000,
|
| 101 |
+
"train_ref_pos_augment": True,
|
| 102 |
+
"test_ref_pos_augment": True,
|
| 103 |
+
"train_sets": ListValue(["weightedPDB_before2109_wopb_nometalc_0925"]),
|
| 104 |
+
"train_sampler": {
|
| 105 |
+
"train_sample_weights": ListValue([1.0]),
|
| 106 |
+
"sampler_type": "weighted",
|
| 107 |
+
},
|
| 108 |
+
"test_sets": ListValue(["recentPDB_1536_sample384_0925"]),
|
| 109 |
+
"weightedPDB_before2109_wopb_nometalc_0925": {
|
| 110 |
+
"base_info": {
|
| 111 |
+
"mmcif_dir": os.path.join(DATA_ROOT_DIR, "mmcif"),
|
| 112 |
+
"bioassembly_dict_dir": os.path.join(DATA_ROOT_DIR, "mmcif_bioassembly"),
|
| 113 |
+
"indices_fpath": os.path.join(
|
| 114 |
+
DATA_ROOT_DIR,
|
| 115 |
+
"indices/weightedPDB_indices_before_2021-09-30_wo_posebusters_resolution_below_9.csv.gz",
|
| 116 |
+
),
|
| 117 |
+
"pdb_list": "",
|
| 118 |
+
"random_sample_if_failed": True,
|
| 119 |
+
"max_n_token": -1, # can be used for removing data with too many tokens.
|
| 120 |
+
"use_reference_chains_only": False,
|
| 121 |
+
"exclusion": { # do not sample the data based on ions.
|
| 122 |
+
"mol_1_type": ListValue(["ions"]),
|
| 123 |
+
"mol_2_type": ListValue(["ions"]),
|
| 124 |
+
},
|
| 125 |
+
},
|
| 126 |
+
**deepcopy(default_weighted_pdb_configs),
|
| 127 |
+
},
|
| 128 |
+
"recentPDB_1536_sample384_0925": {
|
| 129 |
+
"base_info": {
|
| 130 |
+
"mmcif_dir": os.path.join(DATA_ROOT_DIR, "mmcif"),
|
| 131 |
+
"bioassembly_dict_dir": os.path.join(
|
| 132 |
+
DATA_ROOT_DIR, "recentPDB_bioassembly"
|
| 133 |
+
),
|
| 134 |
+
"indices_fpath": os.path.join(
|
| 135 |
+
DATA_ROOT_DIR, "indices/recentPDB_low_homology_maxtoken1536.csv"
|
| 136 |
+
),
|
| 137 |
+
"pdb_list": os.path.join(
|
| 138 |
+
DATA_ROOT_DIR,
|
| 139 |
+
"indices/recentPDB_low_homology_maxtoken1024_sample384_pdb_id.txt",
|
| 140 |
+
),
|
| 141 |
+
"max_n_token": GlobalConfigValue("test_max_n_token"), # filter data
|
| 142 |
+
"sort_by_n_token": False,
|
| 143 |
+
"group_by_pdb_id": True,
|
| 144 |
+
"find_eval_chain_interface": True,
|
| 145 |
+
},
|
| 146 |
+
**deepcopy(default_test_configs),
|
| 147 |
+
},
|
| 148 |
+
"posebusters_0925": {
|
| 149 |
+
"base_info": {
|
| 150 |
+
"mmcif_dir": os.path.join(DATA_ROOT_DIR, "posebusters_mmcif"),
|
| 151 |
+
"bioassembly_dict_dir": os.path.join(
|
| 152 |
+
DATA_ROOT_DIR, "posebusters_bioassembly"
|
| 153 |
+
),
|
| 154 |
+
"indices_fpath": os.path.join(
|
| 155 |
+
DATA_ROOT_DIR, "indices/posebusters_indices_mainchain_interface.csv"
|
| 156 |
+
),
|
| 157 |
+
"pdb_list": "",
|
| 158 |
+
"find_pocket": True,
|
| 159 |
+
"find_all_pockets": False,
|
| 160 |
+
"max_n_token": GlobalConfigValue("test_max_n_token"), # filter data
|
| 161 |
+
},
|
| 162 |
+
**deepcopy(default_test_configs),
|
| 163 |
+
},
|
| 164 |
+
"msa": {
|
| 165 |
+
"enable": True,
|
| 166 |
+
"enable_rna_msa": False,
|
| 167 |
+
"prot": {
|
| 168 |
+
"pairing_db": "uniref100",
|
| 169 |
+
"non_pairing_db": "mmseqs_other",
|
| 170 |
+
"pdb_mmseqs_dir": os.path.join(DATA_ROOT_DIR, "mmcif_msa"),
|
| 171 |
+
"seq_to_pdb_idx_path": os.path.join(DATA_ROOT_DIR, "seq_to_pdb_index.json"),
|
| 172 |
+
"indexing_method": "sequence",
|
| 173 |
+
},
|
| 174 |
+
"rna": {
|
| 175 |
+
"seq_to_pdb_idx_path": "",
|
| 176 |
+
"rna_msa_dir": "",
|
| 177 |
+
"indexing_method": "sequence",
|
| 178 |
+
},
|
| 179 |
+
"strategy": "random",
|
| 180 |
+
"merge_method": "dense_max",
|
| 181 |
+
"min_size": {
|
| 182 |
+
"train": 1,
|
| 183 |
+
"test": 2048,
|
| 184 |
+
},
|
| 185 |
+
"max_size": {
|
| 186 |
+
"train": 16384,
|
| 187 |
+
"test": 16384,
|
| 188 |
+
},
|
| 189 |
+
"sample_cutoff": {
|
| 190 |
+
"train": 2048,
|
| 191 |
+
"test": 2048,
|
| 192 |
+
},
|
| 193 |
+
},
|
| 194 |
+
"template": {
|
| 195 |
+
"enable": False,
|
| 196 |
+
},
|
| 197 |
+
"ccd_components_file": CCD_COMPONENTS_FILE_PATH,
|
| 198 |
+
"ccd_components_rdkit_mol_file": CCD_COMPONENTS_RDKIT_MOL_FILE_PATH,
|
| 199 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/configs/configs_inference.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# pylint: disable=C0114
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
from protenix.config.extend_types import ListValue, RequiredValue
|
| 19 |
+
|
| 20 |
+
current_file_path = os.path.abspath(__file__)
|
| 21 |
+
current_directory = os.path.dirname(current_file_path)
|
| 22 |
+
code_directory = os.path.dirname(current_directory)
|
| 23 |
+
# The model will be download to the following dir if not exists:
|
| 24 |
+
# "./release_data/checkpoint/model_v0.2.0.pt"
|
| 25 |
+
inference_configs = {
|
| 26 |
+
"seeds": ListValue([101]),
|
| 27 |
+
"dump_dir": "./output",
|
| 28 |
+
"need_atom_confidence": False,
|
| 29 |
+
"input_json_path": RequiredValue(str),
|
| 30 |
+
"load_checkpoint_path": os.path.join(
|
| 31 |
+
code_directory, "./release_data/checkpoint/model_v0.2.0.pt"
|
| 32 |
+
),
|
| 33 |
+
"num_workers": 16,
|
| 34 |
+
"use_msa": True,
|
| 35 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from .config import load_config, parse_configs, parse_sys_args, save_config
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/config.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import copy
|
| 17 |
+
import sys
|
| 18 |
+
from typing import Any, Optional, Union
|
| 19 |
+
|
| 20 |
+
import yaml
|
| 21 |
+
from ml_collections.config_dict import ConfigDict
|
| 22 |
+
|
| 23 |
+
from protenix.config.extend_types import (
|
| 24 |
+
DefaultNoneWithType,
|
| 25 |
+
GlobalConfigValue,
|
| 26 |
+
ListValue,
|
| 27 |
+
RequiredValue,
|
| 28 |
+
ValueMaybeNone,
|
| 29 |
+
get_bool_value,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class ArgumentNotSet(object):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ConfigManager(object):
|
| 38 |
+
def __init__(self, global_configs: dict, fill_required_with_null: bool = False):
|
| 39 |
+
"""
|
| 40 |
+
Initialize the ConfigManager instance.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
global_configs (dict): A dictionary containing global configuration settings.
|
| 44 |
+
fill_required_with_null (bool, optional):
|
| 45 |
+
A boolean flag indicating whether required values should be filled with `None` if not provided. Defaults to False.
|
| 46 |
+
"""
|
| 47 |
+
self.global_configs = global_configs
|
| 48 |
+
self.fill_required_with_null = fill_required_with_null
|
| 49 |
+
self.config_infos, self.default_configs = self.get_config_infos()
|
| 50 |
+
|
| 51 |
+
def get_value_info(
|
| 52 |
+
self, value
|
| 53 |
+
) -> tuple[Any, Optional[Any], Optional[bool], Optional[bool]]:
|
| 54 |
+
"""
|
| 55 |
+
Return the type, default value, whether it allows None, and whether it is required for a given value.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
value: The value to determine the information for.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
tuple: A tuple containing the following elements:
|
| 62 |
+
- dtype: The type of the value.
|
| 63 |
+
- default_value: The default value for the value.
|
| 64 |
+
- allow_none: A boolean indicating whether the value can be None.
|
| 65 |
+
- required: A boolean indicating whether the value is required.
|
| 66 |
+
"""
|
| 67 |
+
if isinstance(value, DefaultNoneWithType):
|
| 68 |
+
return value.dtype, None, True, False
|
| 69 |
+
elif isinstance(value, ValueMaybeNone):
|
| 70 |
+
return value.dtype, value.value, True, False
|
| 71 |
+
elif isinstance(value, RequiredValue):
|
| 72 |
+
if self.fill_required_with_null:
|
| 73 |
+
return value.dtype, None, True, False
|
| 74 |
+
else:
|
| 75 |
+
return value.dtype, None, False, True
|
| 76 |
+
elif isinstance(value, GlobalConfigValue):
|
| 77 |
+
return self.get_value_info(self.global_configs[value.global_key])
|
| 78 |
+
elif isinstance(value, ListValue):
|
| 79 |
+
return (value.dtype, value.value, False, False)
|
| 80 |
+
elif isinstance(value, list):
|
| 81 |
+
return (type(value[0]), value, False, False)
|
| 82 |
+
else:
|
| 83 |
+
return type(value), value, False, False
|
| 84 |
+
|
| 85 |
+
def _get_config_infos(self, config_dict: dict) -> dict:
|
| 86 |
+
"""
|
| 87 |
+
Recursively extracts configuration information from a given dictionary.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
config_dict (dict): The dictionary containing configuration settings.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
tuple: A tuple containing two dictionaries:
|
| 94 |
+
- all_keys: A dictionary mapping keys to their corresponding configuration information.
|
| 95 |
+
- default_configs: A dictionary mapping keys to their default configuration values.
|
| 96 |
+
|
| 97 |
+
Raises:
|
| 98 |
+
AssertionError: If a key contains a period (.), which is not allowed.
|
| 99 |
+
"""
|
| 100 |
+
all_keys = {}
|
| 101 |
+
default_configs = {}
|
| 102 |
+
for key, value in config_dict.items():
|
| 103 |
+
assert "." not in key
|
| 104 |
+
if isinstance(value, (dict)):
|
| 105 |
+
children_keys, children_configs = self._get_config_infos(value)
|
| 106 |
+
all_keys.update(
|
| 107 |
+
{
|
| 108 |
+
f"{key}.{child_key}": child_value_type
|
| 109 |
+
for child_key, child_value_type in children_keys.items()
|
| 110 |
+
}
|
| 111 |
+
)
|
| 112 |
+
default_configs[key] = children_configs
|
| 113 |
+
else:
|
| 114 |
+
value_info = self.get_value_info(value)
|
| 115 |
+
all_keys[key] = value_info
|
| 116 |
+
default_configs[key] = value_info[1]
|
| 117 |
+
return all_keys, default_configs
|
| 118 |
+
|
| 119 |
+
def get_config_infos(self):
|
| 120 |
+
return self._get_config_infos(self.global_configs)
|
| 121 |
+
|
| 122 |
+
def _merge_configs(
|
| 123 |
+
self,
|
| 124 |
+
new_configs: dict,
|
| 125 |
+
global_configs: dict,
|
| 126 |
+
local_configs: dict,
|
| 127 |
+
prefix="",
|
| 128 |
+
) -> ConfigDict:
|
| 129 |
+
"""Overwrite default configs with new configs recursively.
|
| 130 |
+
Args:
|
| 131 |
+
new_configs: global flattern config dict with all hierarchical config keys joined by '.', i.e.
|
| 132 |
+
{
|
| 133 |
+
'c_z': 32,
|
| 134 |
+
'model.evoformer.c_z': 16,
|
| 135 |
+
...
|
| 136 |
+
}
|
| 137 |
+
global_configs: global hierarchical merging configs, i.e.
|
| 138 |
+
{
|
| 139 |
+
'c_z' 32,
|
| 140 |
+
'c_m': 128,
|
| 141 |
+
'model': {
|
| 142 |
+
'evoformer': {
|
| 143 |
+
...
|
| 144 |
+
}
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
local_configs: hierarchical merging config dict in current level, i.e. for 'model' level, this maybe
|
| 148 |
+
{
|
| 149 |
+
'evoformer': {
|
| 150 |
+
'c_z': GlobalConfigValue("c_z"),
|
| 151 |
+
},
|
| 152 |
+
'embedder': {
|
| 153 |
+
...
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
prefix (str, optional): A prefix string to prepend to keys during recursion. Defaults to an empty string.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
ConfigDict: The merged configuration dictionary.
|
| 160 |
+
|
| 161 |
+
Raises:
|
| 162 |
+
Exception: If a required config value is not allowed to be None.
|
| 163 |
+
"""
|
| 164 |
+
# Merge configs in current level first, since these configs maybe referenced by lower level
|
| 165 |
+
for key, value in local_configs.items():
|
| 166 |
+
if isinstance(value, dict):
|
| 167 |
+
continue
|
| 168 |
+
full_key = f"{prefix}.{key}" if prefix else key
|
| 169 |
+
dtype, default_value, allow_none, required = self.config_infos[full_key]
|
| 170 |
+
if full_key in new_configs and not isinstance(
|
| 171 |
+
new_configs[full_key], ArgumentNotSet
|
| 172 |
+
):
|
| 173 |
+
if allow_none and new_configs[full_key] in [
|
| 174 |
+
"None",
|
| 175 |
+
"none",
|
| 176 |
+
"null",
|
| 177 |
+
]:
|
| 178 |
+
local_configs[key] = None
|
| 179 |
+
elif dtype == bool:
|
| 180 |
+
local_configs[key] = get_bool_value(new_configs[full_key])
|
| 181 |
+
elif isinstance(value, (ListValue, list)):
|
| 182 |
+
local_configs[key] = (
|
| 183 |
+
[dtype(s) for s in new_configs[full_key].strip().split(",")]
|
| 184 |
+
if new_configs[full_key].strip()
|
| 185 |
+
else []
|
| 186 |
+
)
|
| 187 |
+
else:
|
| 188 |
+
local_configs[key] = dtype(new_configs[full_key])
|
| 189 |
+
elif isinstance(value, GlobalConfigValue):
|
| 190 |
+
local_configs[key] = global_configs[value.global_key]
|
| 191 |
+
else:
|
| 192 |
+
if not allow_none and default_value is None:
|
| 193 |
+
raise Exception(f"config {full_key} not allowed to be none")
|
| 194 |
+
local_configs[key] = default_value
|
| 195 |
+
for key, value in local_configs.items():
|
| 196 |
+
if not isinstance(value, dict):
|
| 197 |
+
continue
|
| 198 |
+
self._merge_configs(
|
| 199 |
+
new_configs, global_configs, value, f"{prefix}.{key}" if prefix else key
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
def merge_configs(self, new_configs: dict) -> ConfigDict:
|
| 203 |
+
configs = copy.deepcopy(self.global_configs)
|
| 204 |
+
self._merge_configs(new_configs, configs, configs)
|
| 205 |
+
return ConfigDict(configs)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def parse_configs(
|
| 209 |
+
configs: dict, arg_str: str = None, fill_required_with_null: bool = False
|
| 210 |
+
) -> ConfigDict:
|
| 211 |
+
"""
|
| 212 |
+
Parses and merges configuration settings from a dictionary and command-line arguments.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
configs (dict): A dictionary containing initial configuration settings.
|
| 216 |
+
arg_str (str, optional): A string representing command-line arguments. Defaults to None.
|
| 217 |
+
fill_required_with_null (bool, optional):
|
| 218 |
+
A boolean flag indicating whether required values should be filled with `None` if not provided. Defaults to False.
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
ConfigDict: The merged configuration dictionary.
|
| 222 |
+
"""
|
| 223 |
+
manager = ConfigManager(configs, fill_required_with_null=fill_required_with_null)
|
| 224 |
+
parser = argparse.ArgumentParser()
|
| 225 |
+
# Register arguments
|
| 226 |
+
for key, (
|
| 227 |
+
dtype,
|
| 228 |
+
default_value,
|
| 229 |
+
allow_none,
|
| 230 |
+
required,
|
| 231 |
+
) in manager.config_infos.items():
|
| 232 |
+
# All config use str type, strings will be converted to real dtype later
|
| 233 |
+
parser.add_argument(
|
| 234 |
+
"--" + key, type=str, default=ArgumentNotSet(), required=required
|
| 235 |
+
)
|
| 236 |
+
# Merge user commandline pargs with default ones
|
| 237 |
+
merged_configs = manager.merge_configs(
|
| 238 |
+
vars(parser.parse_args(arg_str.split())) if arg_str else {}
|
| 239 |
+
)
|
| 240 |
+
return merged_configs
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def parse_sys_args() -> str:
|
| 244 |
+
"""
|
| 245 |
+
Check whether command-line arguments are valid.
|
| 246 |
+
Each argument is expected to be in the format `--key value`.
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
str: A string formatted as command-line arguments.
|
| 250 |
+
|
| 251 |
+
Raises:
|
| 252 |
+
AssertionError: If any key does not start with `--`.
|
| 253 |
+
"""
|
| 254 |
+
args = sys.argv[1:]
|
| 255 |
+
arg_str = ""
|
| 256 |
+
for k, v in zip(args[::2], args[1::2]):
|
| 257 |
+
assert k.startswith("--")
|
| 258 |
+
arg_str += f"{k} {v} "
|
| 259 |
+
return arg_str
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def load_config(path: str) -> dict:
|
| 263 |
+
"""
|
| 264 |
+
Loads a configuration from a YAML file.
|
| 265 |
+
|
| 266 |
+
Args:
|
| 267 |
+
path (str): The path to the YAML file containing the configuration.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
dict: A dictionary containing the configuration loaded from the YAML file.
|
| 271 |
+
"""
|
| 272 |
+
with open(path, "r") as f:
|
| 273 |
+
return yaml.safe_load(f)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def save_config(config: Union[ConfigDict, dict], path: str) -> None:
|
| 277 |
+
"""
|
| 278 |
+
Saves a configuration to a YAML file.
|
| 279 |
+
|
| 280 |
+
Args:
|
| 281 |
+
config (ConfigDict or dict): The configuration to be saved.
|
| 282 |
+
If it is a ConfigDict, it will be converted to a dictionary.
|
| 283 |
+
path (str): The path to the YAML file where the configuration will be saved.
|
| 284 |
+
"""
|
| 285 |
+
with open(path, "w") as f:
|
| 286 |
+
if isinstance(config, ConfigDict):
|
| 287 |
+
config = config.to_dict()
|
| 288 |
+
yaml.safe_dump(config, f)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/config/extend_types.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class DefaultNoneWithType(object):
|
| 17 |
+
def __init__(self, dtype):
|
| 18 |
+
self.dtype = dtype
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ValueMaybeNone(object):
|
| 22 |
+
def __init__(self, value):
|
| 23 |
+
assert value is not None
|
| 24 |
+
self.dtype = type(value)
|
| 25 |
+
self.value = value
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class GlobalConfigValue(object):
|
| 29 |
+
def __init__(self, global_key):
|
| 30 |
+
self.global_key = global_key
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class RequiredValue(object):
|
| 34 |
+
def __init__(self, dtype):
|
| 35 |
+
self.dtype = dtype
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ListValue(object):
|
| 39 |
+
def __init__(self, value, dtype=None):
|
| 40 |
+
if value is not None:
|
| 41 |
+
self.value = value
|
| 42 |
+
self.dtype = type(value[0])
|
| 43 |
+
else:
|
| 44 |
+
self.value = None
|
| 45 |
+
self.dtype = dtype
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_bool_value(bool_str: str):
|
| 49 |
+
bool_str_lower = bool_str.lower()
|
| 50 |
+
if bool_str_lower in ("false", "f", "no", "n", "0"):
|
| 51 |
+
return False
|
| 52 |
+
elif bool_str_lower in ("true", "t", "yes", "y", "1"):
|
| 53 |
+
return True
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError(f"Cannot interpret {bool_str} as bool")
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/ccd.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import functools
|
| 16 |
+
import logging
|
| 17 |
+
import pickle
|
| 18 |
+
from collections import defaultdict
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import Any, Optional, Union
|
| 21 |
+
|
| 22 |
+
import biotite
|
| 23 |
+
import biotite.structure as struc
|
| 24 |
+
import biotite.structure.io.pdbx as pdbx
|
| 25 |
+
import numpy as np
|
| 26 |
+
from biotite.structure import AtomArray
|
| 27 |
+
from rdkit import Chem
|
| 28 |
+
|
| 29 |
+
from configs.configs_data import data_configs
|
| 30 |
+
from protenix.data.substructure_perms import get_substructure_perms
|
| 31 |
+
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
COMPONENTS_FILE = data_configs["ccd_components_file"]
|
| 35 |
+
RKDIT_MOL_PKL = Path(data_configs["ccd_components_rdkit_mol_file"])
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@functools.lru_cache
|
| 39 |
+
def biotite_load_ccd_cif() -> pdbx.CIFFile:
|
| 40 |
+
"""biotite load CCD components file
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
pdbx.CIFFile: ccd components file
|
| 44 |
+
"""
|
| 45 |
+
return pdbx.CIFFile.read(COMPONENTS_FILE)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _map_central_to_leaving_groups(component) -> Optional[dict[str, list[list[str]]]]:
|
| 49 |
+
"""map each central atom (bonded atom) index to leaving atom groups in component (atom_array).
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
dict[str, list[list[str]]]: central atom name to leaving atom groups (atom names).
|
| 53 |
+
"""
|
| 54 |
+
comp = component.copy()
|
| 55 |
+
# Eg: ions
|
| 56 |
+
if comp.bonds is None:
|
| 57 |
+
return {}
|
| 58 |
+
central_to_leaving_groups = defaultdict(list)
|
| 59 |
+
for c_idx in np.flatnonzero(~comp.leaving_atom_flag):
|
| 60 |
+
bonds, _ = comp.bonds.get_bonds(c_idx)
|
| 61 |
+
for l_idx in bonds:
|
| 62 |
+
if comp.leaving_atom_flag[l_idx]:
|
| 63 |
+
comp.bonds.remove_bond(c_idx, l_idx)
|
| 64 |
+
group_idx = struc.find_connected(comp.bonds, l_idx)
|
| 65 |
+
if not np.all(comp.leaving_atom_flag[group_idx]):
|
| 66 |
+
return None
|
| 67 |
+
central_to_leaving_groups[comp.atom_name[c_idx]].append(
|
| 68 |
+
comp.atom_name[group_idx].tolist()
|
| 69 |
+
)
|
| 70 |
+
return central_to_leaving_groups
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@functools.lru_cache
|
| 74 |
+
def get_component_atom_array(
|
| 75 |
+
ccd_code: str, keep_leaving_atoms: bool = False, keep_hydrogens=False
|
| 76 |
+
) -> AtomArray:
|
| 77 |
+
"""get component atom array
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
ccd_code (str): ccd code
|
| 81 |
+
keep_leaving_atoms (bool, optional): keep leaving atoms. Defaults to False.
|
| 82 |
+
keep_hydrogens (bool, optional): keep hydrogens. Defaults to False.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
AtomArray: Biotite AtomArray of CCD component
|
| 86 |
+
with additional attribute: leaving_atom_flag (bool)
|
| 87 |
+
"""
|
| 88 |
+
ccd_cif = biotite_load_ccd_cif()
|
| 89 |
+
if ccd_code not in ccd_cif:
|
| 90 |
+
logger.warning(f"Warning: get_component_atom_array() can not parse {ccd_code}")
|
| 91 |
+
return None
|
| 92 |
+
try:
|
| 93 |
+
comp = pdbx.get_component(ccd_cif, data_block=ccd_code, use_ideal_coord=True)
|
| 94 |
+
except biotite.InvalidFileError as e:
|
| 95 |
+
# Eg: UNL without atom.
|
| 96 |
+
logger.warning(
|
| 97 |
+
f"Warning: get_component_atom_array() can not parse {ccd_code} for {e}"
|
| 98 |
+
)
|
| 99 |
+
return None
|
| 100 |
+
atom_category = ccd_cif[ccd_code]["chem_comp_atom"]
|
| 101 |
+
leaving_atom_flag = atom_category["pdbx_leaving_atom_flag"].as_array()
|
| 102 |
+
comp.set_annotation("leaving_atom_flag", leaving_atom_flag == "Y")
|
| 103 |
+
|
| 104 |
+
for atom_id in ["alt_atom_id", "pdbx_component_atom_id"]:
|
| 105 |
+
comp.set_annotation(atom_id, atom_category[atom_id].as_array())
|
| 106 |
+
if not keep_leaving_atoms:
|
| 107 |
+
comp = comp[~comp.leaving_atom_flag]
|
| 108 |
+
if not keep_hydrogens:
|
| 109 |
+
# EG: ND4
|
| 110 |
+
comp = comp[~np.isin(comp.element, ["H", "D"])]
|
| 111 |
+
|
| 112 |
+
# Map central atom index to leaving group (atom_indices) in component (atom_array).
|
| 113 |
+
comp.central_to_leaving_groups = _map_central_to_leaving_groups(comp)
|
| 114 |
+
if comp.central_to_leaving_groups is None:
|
| 115 |
+
logger.warning(
|
| 116 |
+
f"Warning: ccd {ccd_code} has leaving atom group bond to more than one central atom, central_to_leaving_groups is None."
|
| 117 |
+
)
|
| 118 |
+
return comp
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@functools.lru_cache(maxsize=None)
|
| 122 |
+
def get_one_letter_code(ccd_code: str) -> Union[str, None]:
|
| 123 |
+
"""get one_letter_code from CCD components file.
|
| 124 |
+
|
| 125 |
+
normal return is one letter: ALA --> A, DT --> T
|
| 126 |
+
unknown protein: X
|
| 127 |
+
unknown DNA or RNA: N
|
| 128 |
+
other unknown: None
|
| 129 |
+
some ccd_code will return more than one letter:
|
| 130 |
+
eg: XXY --> THG
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
ccd_code (str): _description_
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
str: one letter code
|
| 137 |
+
"""
|
| 138 |
+
ccd_cif = biotite_load_ccd_cif()
|
| 139 |
+
if ccd_code not in ccd_cif:
|
| 140 |
+
return None
|
| 141 |
+
one = ccd_cif[ccd_code]["chem_comp"]["one_letter_code"].as_item()
|
| 142 |
+
if one == "?":
|
| 143 |
+
return None
|
| 144 |
+
else:
|
| 145 |
+
return one
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@functools.lru_cache(maxsize=None)
|
| 149 |
+
def get_mol_type(ccd_code: str) -> str:
|
| 150 |
+
"""get mol_type from CCD components file.
|
| 151 |
+
|
| 152 |
+
based on _chem_comp.type
|
| 153 |
+
http://mmcif.rcsb.org/dictionaries/mmcif_pdbx_v50.dic/Items/_chem_comp.type.html
|
| 154 |
+
|
| 155 |
+
not use _chem_comp.pdbx_type, because it is not consistent with _chem_comp.type
|
| 156 |
+
e.g. ccd 000 --> _chem_comp.type="NON-POLYMER" _chem_comp.pdbx_type="ATOMP"
|
| 157 |
+
https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v5_next.dic/Items/_struct_asym.pdbx_type.html
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
ccd_code (str): ccd code
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
str: mol_type, one of {"protein", "rna", "dna", "ligand"}
|
| 164 |
+
"""
|
| 165 |
+
ccd_cif = biotite_load_ccd_cif()
|
| 166 |
+
if ccd_code not in ccd_cif:
|
| 167 |
+
return "ligand"
|
| 168 |
+
|
| 169 |
+
link_type = ccd_cif[ccd_code]["chem_comp"]["type"].as_item().upper()
|
| 170 |
+
|
| 171 |
+
if "PEPTIDE" in link_type and link_type != "PEPTIDE-LIKE":
|
| 172 |
+
return "protein"
|
| 173 |
+
if "DNA" in link_type:
|
| 174 |
+
return "dna"
|
| 175 |
+
if "RNA" in link_type:
|
| 176 |
+
return "rna"
|
| 177 |
+
return "ligand"
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def get_all_ccd_code() -> list:
|
| 181 |
+
"""get all ccd code from components file"""
|
| 182 |
+
ccd_cif = biotite_load_ccd_cif()
|
| 183 |
+
return list(ccd_cif.keys())
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
_ccd_rdkit_mols: dict[str, Chem.Mol] = {}
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def get_component_rdkit_mol(ccd_code: str) -> Union[Chem.Mol, None]:
|
| 190 |
+
"""get rdkit mol by PDBeCCDUtils
|
| 191 |
+
https://github.com/PDBeurope/ccdutils
|
| 192 |
+
|
| 193 |
+
preprocessing all ccd components in _components_file at first time run.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
ccd_code (str): ccd code
|
| 197 |
+
|
| 198 |
+
Returns
|
| 199 |
+
rdkit.Chem.Mol: rdkit mol with ref coord
|
| 200 |
+
"""
|
| 201 |
+
global _ccd_rdkit_mols
|
| 202 |
+
# _ccd_rdkit_mols is not empty
|
| 203 |
+
if _ccd_rdkit_mols:
|
| 204 |
+
return _ccd_rdkit_mols.get(ccd_code, None)
|
| 205 |
+
|
| 206 |
+
rdkit_mol_pkl = RKDIT_MOL_PKL
|
| 207 |
+
if rdkit_mol_pkl.exists():
|
| 208 |
+
with open(rdkit_mol_pkl, "rb") as f:
|
| 209 |
+
_ccd_rdkit_mols = pickle.load(f)
|
| 210 |
+
return _ccd_rdkit_mols.get(ccd_code, None)
|
| 211 |
+
else:
|
| 212 |
+
raise FileNotFoundError(
|
| 213 |
+
f"CCD components file {rdkit_mol_pkl} not found, please download it to your DATA_ROOT_DIR before running."
|
| 214 |
+
"See https://github.com/bytedance/Protenix"
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@functools.lru_cache
|
| 219 |
+
def get_ccd_ref_info(ccd_code: str, return_perm: bool = True) -> dict[str, Any]:
|
| 220 |
+
"""
|
| 221 |
+
Ref: AlphaFold3 SI Chapter 2.8
|
| 222 |
+
Reference features. Features derived from a residue, nucleotide or ligand’s reference conformer.
|
| 223 |
+
Given an input CCD code or SMILES string, the conformer is typically generated
|
| 224 |
+
with RDKit v.2023_03_3 [25] using ETKDGv3 [26]. On error, we fall back to using the CCD ideal coordinates,
|
| 225 |
+
or finally the representative coordinates
|
| 226 |
+
if they are from before our training date cut-off (2021-09-30 unless otherwise stated).
|
| 227 |
+
At the end, any atom coordinates still missing are set to zeros.
|
| 228 |
+
|
| 229 |
+
Get reference atom mapping and coordinates.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
name (str): CCD name
|
| 233 |
+
return_perm (bool): return atom permutations.
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
Dict:
|
| 237 |
+
ccd: ccd code
|
| 238 |
+
atom_map: atom name to atom index
|
| 239 |
+
coord: atom coordinates
|
| 240 |
+
charge: atom formal charge
|
| 241 |
+
perm: atom permutation
|
| 242 |
+
"""
|
| 243 |
+
mol = get_component_rdkit_mol(ccd_code)
|
| 244 |
+
if mol is None:
|
| 245 |
+
return {}
|
| 246 |
+
if mol.GetNumAtoms() == 0: # eg: "UNL"
|
| 247 |
+
logger.warning(
|
| 248 |
+
f"Warning: mol {ccd_code} from get_component_rdkit_mol() has no atoms,"
|
| 249 |
+
"get_ccd_ref_info() return empty dict"
|
| 250 |
+
)
|
| 251 |
+
return {}
|
| 252 |
+
conf = mol.GetConformer(mol.ref_conf_id)
|
| 253 |
+
coord = conf.GetPositions()
|
| 254 |
+
charge = np.array([atom.GetFormalCharge() for atom in mol.GetAtoms()])
|
| 255 |
+
|
| 256 |
+
results = {
|
| 257 |
+
"ccd": ccd_code, # str
|
| 258 |
+
"atom_map": mol.atom_map, # dict[str,int]: atom name to atom index
|
| 259 |
+
"coord": coord, # np.ndarray[float]: atom coordinates, shape:(n_atom,3)
|
| 260 |
+
"mask": mol.ref_mask, # np.ndarray[bool]: atom mask, shape:(n_atom,)
|
| 261 |
+
"charge": charge, # np.ndarray[int]: atom formal charge, shape:(n_atom,)
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
if return_perm:
|
| 265 |
+
try:
|
| 266 |
+
Chem.SanitizeMol(mol)
|
| 267 |
+
perm = get_substructure_perms(mol, MaxMatches=1000)
|
| 268 |
+
|
| 269 |
+
except:
|
| 270 |
+
# Sanitize failed, permutation is unavailable
|
| 271 |
+
perm = np.array(
|
| 272 |
+
[
|
| 273 |
+
[
|
| 274 |
+
i
|
| 275 |
+
for i, atom in enumerate(mol.GetAtoms())
|
| 276 |
+
if atom.GetAtomicNum() != 1
|
| 277 |
+
]
|
| 278 |
+
]
|
| 279 |
+
)
|
| 280 |
+
# np.ndarray[int]: atom permutation, shape:(n_atom_wo_h, n_perm)
|
| 281 |
+
results["perm"] = perm.T
|
| 282 |
+
|
| 283 |
+
return results
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
# Modified from biotite to use consistent ccd components file
|
| 287 |
+
def _connect_inter_residue(
|
| 288 |
+
atoms: AtomArray, residue_starts: np.ndarray
|
| 289 |
+
) -> struc.BondList:
|
| 290 |
+
"""
|
| 291 |
+
Create a :class:`BondList` containing the bonds between adjacent
|
| 292 |
+
amino acid or nucleotide residues.
|
| 293 |
+
|
| 294 |
+
Parameters
|
| 295 |
+
----------
|
| 296 |
+
atoms : AtomArray or AtomArrayStack
|
| 297 |
+
The structure to create the :class:`BondList` for.
|
| 298 |
+
residue_starts : ndarray, dtype=int
|
| 299 |
+
Return value of
|
| 300 |
+
``get_residue_starts(atoms, add_exclusive_stop=True)``.
|
| 301 |
+
|
| 302 |
+
Returns
|
| 303 |
+
-------
|
| 304 |
+
BondList
|
| 305 |
+
A bond list containing all inter residue bonds.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
bonds = []
|
| 309 |
+
|
| 310 |
+
atom_names = atoms.atom_name
|
| 311 |
+
res_names = atoms.res_name
|
| 312 |
+
res_ids = atoms.res_id
|
| 313 |
+
chain_ids = atoms.chain_id
|
| 314 |
+
|
| 315 |
+
# Iterate over all starts excluding:
|
| 316 |
+
# - the last residue and
|
| 317 |
+
# - exclusive end index of 'atoms'
|
| 318 |
+
for i in range(len(residue_starts) - 2):
|
| 319 |
+
curr_start_i = residue_starts[i]
|
| 320 |
+
next_start_i = residue_starts[i + 1]
|
| 321 |
+
after_next_start_i = residue_starts[i + 2]
|
| 322 |
+
|
| 323 |
+
# Check if the current and next residue is in the same chain
|
| 324 |
+
if chain_ids[next_start_i] != chain_ids[curr_start_i]:
|
| 325 |
+
continue
|
| 326 |
+
# Check if the current and next residue
|
| 327 |
+
# have consecutive residue IDs
|
| 328 |
+
# (Same residue ID is also possible if insertion code is used)
|
| 329 |
+
if res_ids[next_start_i] - res_ids[curr_start_i] > 1:
|
| 330 |
+
continue
|
| 331 |
+
|
| 332 |
+
# Get link type for this residue from RCSB components.cif
|
| 333 |
+
curr_link = get_mol_type(res_names[curr_start_i])
|
| 334 |
+
next_link = get_mol_type(res_names[next_start_i])
|
| 335 |
+
|
| 336 |
+
if curr_link == "protein" and next_link in "protein":
|
| 337 |
+
curr_connect_atom_name = "C"
|
| 338 |
+
next_connect_atom_name = "N"
|
| 339 |
+
elif curr_link in ["dna", "rna"] and next_link in ["dna", "rna"]:
|
| 340 |
+
curr_connect_atom_name = "O3'"
|
| 341 |
+
next_connect_atom_name = "P"
|
| 342 |
+
else:
|
| 343 |
+
# Create no bond if the connection types of consecutive
|
| 344 |
+
# residues are not compatible
|
| 345 |
+
continue
|
| 346 |
+
|
| 347 |
+
# Index in atom array for atom name in current residue
|
| 348 |
+
# Addition of 'curr_start_i' is necessary, as only a slice of
|
| 349 |
+
# 'atom_names' is taken, beginning at 'curr_start_i'
|
| 350 |
+
curr_connect_indices = np.where(
|
| 351 |
+
atom_names[curr_start_i:next_start_i] == curr_connect_atom_name
|
| 352 |
+
)[0]
|
| 353 |
+
curr_connect_indices += curr_start_i
|
| 354 |
+
|
| 355 |
+
# Index in atom array for atom name in next residue
|
| 356 |
+
next_connect_indices = np.where(
|
| 357 |
+
atom_names[next_start_i:after_next_start_i] == next_connect_atom_name
|
| 358 |
+
)[0]
|
| 359 |
+
next_connect_indices += next_start_i
|
| 360 |
+
|
| 361 |
+
if len(curr_connect_indices) == 0 or len(next_connect_indices) == 0:
|
| 362 |
+
# The connector atoms are not found in the adjacent residues
|
| 363 |
+
# -> skip this bond
|
| 364 |
+
continue
|
| 365 |
+
|
| 366 |
+
bonds.append(
|
| 367 |
+
(curr_connect_indices[0], next_connect_indices[0], struc.BondType.SINGLE)
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
return struc.BondList(atoms.array_length(), np.array(bonds, dtype=np.uint32))
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def add_inter_residue_bonds(
|
| 374 |
+
atom_array: AtomArray,
|
| 375 |
+
exclude_struct_conn_pairs: bool = False,
|
| 376 |
+
remove_far_inter_chain_pairs: bool = False,
|
| 377 |
+
) -> AtomArray:
|
| 378 |
+
"""
|
| 379 |
+
add polymer bonds (C-N or O3'-P) between adjacent residues based on auth_seq_id.
|
| 380 |
+
|
| 381 |
+
exclude_struct_conn_pairs: if True, do not add bond between adjacent residues already has non-standard polymer bonds
|
| 382 |
+
on atom C or N or O3' or P.
|
| 383 |
+
|
| 384 |
+
remove_far_inter_chain_pairs: if True, remove inter chain (based on label_asym_id) bonds that are far away from each other.
|
| 385 |
+
|
| 386 |
+
returns:
|
| 387 |
+
AtomArray: Biotite AtomArray merged inter residue bonds into atom_array.bonds
|
| 388 |
+
"""
|
| 389 |
+
res_starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 390 |
+
inter_bonds = _connect_inter_residue(atom_array, res_starts)
|
| 391 |
+
|
| 392 |
+
if atom_array.bonds is None:
|
| 393 |
+
atom_array.bonds = inter_bonds
|
| 394 |
+
return atom_array
|
| 395 |
+
|
| 396 |
+
select_mask = np.ones(len(inter_bonds._bonds), dtype=bool)
|
| 397 |
+
if exclude_struct_conn_pairs:
|
| 398 |
+
for b_idx, (atom_i, atom_j, b_type) in enumerate(inter_bonds._bonds):
|
| 399 |
+
atom_k = atom_i if atom_array.atom_name[atom_i] in ("N", "O3'") else atom_j
|
| 400 |
+
bonds, types = atom_array.bonds.get_bonds(atom_k)
|
| 401 |
+
if len(bonds) == 0:
|
| 402 |
+
continue
|
| 403 |
+
for b in bonds:
|
| 404 |
+
if (
|
| 405 |
+
# adjacent residues
|
| 406 |
+
abs((res_starts <= b).sum() - (res_starts <= atom_k).sum()) == 1
|
| 407 |
+
and atom_array.chain_id[b] == atom_array.chain_id[atom_k]
|
| 408 |
+
and atom_array.atom_name[b] not in ("C", "P")
|
| 409 |
+
):
|
| 410 |
+
select_mask[b_idx] = False
|
| 411 |
+
break
|
| 412 |
+
|
| 413 |
+
if remove_far_inter_chain_pairs:
|
| 414 |
+
if not hasattr(atom_array, "label_asym_id"):
|
| 415 |
+
logging.warning(
|
| 416 |
+
"label_asym_id not found, far inter chain bonds will not be removed"
|
| 417 |
+
)
|
| 418 |
+
for b_idx, (atom_i, atom_j, b_type) in enumerate(inter_bonds._bonds):
|
| 419 |
+
if atom_array.label_asym_id[atom_i] != atom_array.label_asym_id[atom_j]:
|
| 420 |
+
coord_i = atom_array.coord[atom_i]
|
| 421 |
+
coord_j = atom_array.coord[atom_j]
|
| 422 |
+
if np.linalg.norm(coord_i - coord_j) > 2.5:
|
| 423 |
+
select_mask[b_idx] = False
|
| 424 |
+
|
| 425 |
+
# filter out removed_inter_bonds from atom_array.bonds
|
| 426 |
+
remove_bonds = inter_bonds._bonds[~select_mask]
|
| 427 |
+
remove_mask = np.isin(atom_array.bonds._bonds[:, 0], remove_bonds[:, 0]) & np.isin(
|
| 428 |
+
atom_array.bonds._bonds[:, 1], remove_bonds[:, 1]
|
| 429 |
+
)
|
| 430 |
+
atom_array.bonds._bonds = atom_array.bonds._bonds[~remove_mask]
|
| 431 |
+
|
| 432 |
+
# merged normal inter_bonds into atom_array.bonds
|
| 433 |
+
inter_bonds._bonds = inter_bonds._bonds[select_mask]
|
| 434 |
+
atom_array.bonds = atom_array.bonds.merge(inter_bonds)
|
| 435 |
+
return atom_array
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def res_names_to_sequence(res_names: list[str]) -> str:
|
| 439 |
+
"""convert res_names to sequences {chain_id: canonical_sequence} based on CCD
|
| 440 |
+
|
| 441 |
+
Return
|
| 442 |
+
str: canonical_sequence
|
| 443 |
+
"""
|
| 444 |
+
seq = ""
|
| 445 |
+
for res_name in res_names:
|
| 446 |
+
one = get_one_letter_code(res_name)
|
| 447 |
+
one = "X" if one is None else one
|
| 448 |
+
one = "X" if len(one) > 1 else one
|
| 449 |
+
seq += one
|
| 450 |
+
return seq
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/constants.py
ADDED
|
@@ -0,0 +1,964 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
EvaluationChainInterface = [
|
| 16 |
+
"intra_ligand",
|
| 17 |
+
"intra_dna",
|
| 18 |
+
"intra_rna",
|
| 19 |
+
"intra_prot",
|
| 20 |
+
"ligand_prot",
|
| 21 |
+
"rna_prot",
|
| 22 |
+
"dna_prot",
|
| 23 |
+
"prot_prot",
|
| 24 |
+
"antibody_antigen",
|
| 25 |
+
"antibody",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
EntityPolyTypeDict = {
|
| 29 |
+
"nuc": [
|
| 30 |
+
"peptide nucleic acid",
|
| 31 |
+
"polydeoxyribonucleotide",
|
| 32 |
+
"polydeoxyribonucleotide/polyribonucleotide hybrid",
|
| 33 |
+
"polyribonucleotide",
|
| 34 |
+
],
|
| 35 |
+
"protein": ["polypeptide(D)", "polypeptide(L)"],
|
| 36 |
+
"ligand": ["cyclic-pseudo-peptide", "other"],
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
### Protein Constants ###
|
| 41 |
+
# https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v40.dic/Items/_entity_poly.pdbx_seq_one_letter_code_can.html
|
| 42 |
+
from rdkit.Chem import GetPeriodicTable
|
| 43 |
+
|
| 44 |
+
mmcif_restype_1to3 = {
|
| 45 |
+
"A": "ALA",
|
| 46 |
+
"R": "ARG",
|
| 47 |
+
"N": "ASN",
|
| 48 |
+
"D": "ASP",
|
| 49 |
+
"C": "CYS",
|
| 50 |
+
"Q": "GLN",
|
| 51 |
+
"E": "GLU",
|
| 52 |
+
"G": "GLY",
|
| 53 |
+
"H": "HIS",
|
| 54 |
+
"I": "ILE",
|
| 55 |
+
"L": "LEU",
|
| 56 |
+
"K": "LYS",
|
| 57 |
+
"M": "MET",
|
| 58 |
+
"F": "PHE",
|
| 59 |
+
"P": "PRO",
|
| 60 |
+
"S": "SER",
|
| 61 |
+
"T": "THR",
|
| 62 |
+
"W": "TRP",
|
| 63 |
+
"Y": "TYR",
|
| 64 |
+
"V": "VAL",
|
| 65 |
+
"B": "ASX", # additional
|
| 66 |
+
"Z": "GLX", # additional
|
| 67 |
+
# "X": "UNK",
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
mmcif_restype_3to1 = {v: k for k, v in mmcif_restype_1to3.items()}
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
vdw table from rdkit; indices match those of the ligand atom_types.
|
| 74 |
+
https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/atomic_data.cpp#L46
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
rdkit_vdws = [
|
| 78 |
+
1.2,
|
| 79 |
+
1.4,
|
| 80 |
+
2.2,
|
| 81 |
+
1.9,
|
| 82 |
+
1.8,
|
| 83 |
+
1.7,
|
| 84 |
+
1.6,
|
| 85 |
+
1.55,
|
| 86 |
+
1.5,
|
| 87 |
+
1.54,
|
| 88 |
+
2.4,
|
| 89 |
+
2.2,
|
| 90 |
+
2.1,
|
| 91 |
+
2.1,
|
| 92 |
+
1.95,
|
| 93 |
+
1.8,
|
| 94 |
+
1.8,
|
| 95 |
+
1.88,
|
| 96 |
+
2.8,
|
| 97 |
+
2.4,
|
| 98 |
+
2.3,
|
| 99 |
+
2.15,
|
| 100 |
+
2.05,
|
| 101 |
+
2.05,
|
| 102 |
+
2.05,
|
| 103 |
+
2.05,
|
| 104 |
+
2.0,
|
| 105 |
+
2.0,
|
| 106 |
+
2.0,
|
| 107 |
+
2.1,
|
| 108 |
+
2.1,
|
| 109 |
+
2.1,
|
| 110 |
+
2.05,
|
| 111 |
+
1.9,
|
| 112 |
+
1.9,
|
| 113 |
+
2.02,
|
| 114 |
+
2.9,
|
| 115 |
+
2.55,
|
| 116 |
+
2.4,
|
| 117 |
+
2.3,
|
| 118 |
+
2.15,
|
| 119 |
+
2.1,
|
| 120 |
+
2.05,
|
| 121 |
+
2.05,
|
| 122 |
+
2.0,
|
| 123 |
+
2.05,
|
| 124 |
+
2.1,
|
| 125 |
+
2.2,
|
| 126 |
+
2.2,
|
| 127 |
+
2.25,
|
| 128 |
+
2.2,
|
| 129 |
+
2.1,
|
| 130 |
+
2.1,
|
| 131 |
+
2.16,
|
| 132 |
+
3.0,
|
| 133 |
+
2.7,
|
| 134 |
+
2.5,
|
| 135 |
+
2.48,
|
| 136 |
+
2.47,
|
| 137 |
+
2.45,
|
| 138 |
+
2.43,
|
| 139 |
+
2.42,
|
| 140 |
+
2.4,
|
| 141 |
+
2.38,
|
| 142 |
+
2.37,
|
| 143 |
+
2.35,
|
| 144 |
+
2.33,
|
| 145 |
+
2.32,
|
| 146 |
+
2.3,
|
| 147 |
+
2.28,
|
| 148 |
+
2.27,
|
| 149 |
+
2.25,
|
| 150 |
+
2.2,
|
| 151 |
+
2.1,
|
| 152 |
+
2.05,
|
| 153 |
+
2.0,
|
| 154 |
+
2.0,
|
| 155 |
+
2.05,
|
| 156 |
+
2.1,
|
| 157 |
+
2.05,
|
| 158 |
+
2.2,
|
| 159 |
+
2.3,
|
| 160 |
+
2.3,
|
| 161 |
+
2.0,
|
| 162 |
+
2.0,
|
| 163 |
+
2.0,
|
| 164 |
+
2.0,
|
| 165 |
+
2.0,
|
| 166 |
+
2.0,
|
| 167 |
+
2.4,
|
| 168 |
+
2.0,
|
| 169 |
+
2.3,
|
| 170 |
+
2.0,
|
| 171 |
+
2.0,
|
| 172 |
+
2.0,
|
| 173 |
+
2.0,
|
| 174 |
+
2.0,
|
| 175 |
+
2.0,
|
| 176 |
+
2.0,
|
| 177 |
+
2.0,
|
| 178 |
+
2.0,
|
| 179 |
+
2.0,
|
| 180 |
+
2.0,
|
| 181 |
+
2.0,
|
| 182 |
+
2.0,
|
| 183 |
+
2.0,
|
| 184 |
+
2.0,
|
| 185 |
+
2.0,
|
| 186 |
+
2.0,
|
| 187 |
+
2.0,
|
| 188 |
+
2.0,
|
| 189 |
+
2.0,
|
| 190 |
+
2.0,
|
| 191 |
+
2.0,
|
| 192 |
+
2.0,
|
| 193 |
+
2.0,
|
| 194 |
+
2.0,
|
| 195 |
+
2.0,
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
"""
|
| 200 |
+
atom37 vdw table. Orders match atom37 indices. Note: the vdw's for N and O are different from rdkit_van_der_waals in this file.
|
| 201 |
+
We used the rdkit values for consistency.
|
| 202 |
+
Ref to https://github.com/aqlaboratory/openfold/blob/80c85b54e1a81d9a66df3f1b6c257ff97f10acd3/openfold/utils/loss.py#L1208C5-L1211C6
|
| 203 |
+
rdkit_van_der_waals_radius = {
|
| 204 |
+
"C": 1.7,
|
| 205 |
+
"N": 1.6,
|
| 206 |
+
"O": 1.55,
|
| 207 |
+
"S": 1.8,
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
atom37_vdw = [
|
| 211 |
+
rdkit_van_der_waals_radius[name[0]]
|
| 212 |
+
for name in residue_constants.atom_types
|
| 213 |
+
]
|
| 214 |
+
|
| 215 |
+
"""
|
| 216 |
+
atom37_vdw = [
|
| 217 |
+
1.55,
|
| 218 |
+
1.7,
|
| 219 |
+
1.7,
|
| 220 |
+
1.7,
|
| 221 |
+
1.52,
|
| 222 |
+
1.7,
|
| 223 |
+
1.7,
|
| 224 |
+
1.7,
|
| 225 |
+
1.52,
|
| 226 |
+
1.52,
|
| 227 |
+
1.8,
|
| 228 |
+
1.7,
|
| 229 |
+
1.7,
|
| 230 |
+
1.7,
|
| 231 |
+
1.55,
|
| 232 |
+
1.55,
|
| 233 |
+
1.52,
|
| 234 |
+
1.52,
|
| 235 |
+
1.8,
|
| 236 |
+
1.7,
|
| 237 |
+
1.7,
|
| 238 |
+
1.7,
|
| 239 |
+
1.7,
|
| 240 |
+
1.55,
|
| 241 |
+
1.55,
|
| 242 |
+
1.55,
|
| 243 |
+
1.52,
|
| 244 |
+
1.52,
|
| 245 |
+
1.7,
|
| 246 |
+
1.55,
|
| 247 |
+
1.55,
|
| 248 |
+
1.52,
|
| 249 |
+
1.7,
|
| 250 |
+
1.7,
|
| 251 |
+
1.7,
|
| 252 |
+
1.55,
|
| 253 |
+
1.52,
|
| 254 |
+
]
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# Standard residues (AlphaFold3 SI Talbe 13)
|
| 258 |
+
PRO_STD_RESIDUES = {
|
| 259 |
+
"ALA": 0,
|
| 260 |
+
"ARG": 1,
|
| 261 |
+
"ASN": 2,
|
| 262 |
+
"ASP": 3,
|
| 263 |
+
"CYS": 4,
|
| 264 |
+
"GLN": 5,
|
| 265 |
+
"GLU": 6,
|
| 266 |
+
"GLY": 7,
|
| 267 |
+
"HIS": 8,
|
| 268 |
+
"ILE": 9,
|
| 269 |
+
"LEU": 10,
|
| 270 |
+
"LYS": 11,
|
| 271 |
+
"MET": 12,
|
| 272 |
+
"PHE": 13,
|
| 273 |
+
"PRO": 14,
|
| 274 |
+
"SER": 15,
|
| 275 |
+
"THR": 16,
|
| 276 |
+
"TRP": 17,
|
| 277 |
+
"TYR": 18,
|
| 278 |
+
"VAL": 19,
|
| 279 |
+
"UNK": 20,
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
RNA_STD_RESIDUES = {
|
| 283 |
+
"A": 21,
|
| 284 |
+
"G": 22,
|
| 285 |
+
"C": 23,
|
| 286 |
+
"U": 24,
|
| 287 |
+
"N": 29,
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
DNA_STD_RESIDUES = {
|
| 291 |
+
"DA": 25,
|
| 292 |
+
"DG": 26,
|
| 293 |
+
"DC": 27,
|
| 294 |
+
"DT": 28,
|
| 295 |
+
"DN": 30,
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
STD_RESIDUES = PRO_STD_RESIDUES | RNA_STD_RESIDUES | DNA_STD_RESIDUES
|
| 299 |
+
|
| 300 |
+
rna_order_with_x = {
|
| 301 |
+
"A": 0,
|
| 302 |
+
"G": 1,
|
| 303 |
+
"C": 2,
|
| 304 |
+
"U": 3,
|
| 305 |
+
"N": 4,
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
RNA_NT_TO_ID = {
|
| 309 |
+
"A": 0,
|
| 310 |
+
"G": 1,
|
| 311 |
+
"C": 2,
|
| 312 |
+
"U": 3,
|
| 313 |
+
"N": 4,
|
| 314 |
+
"R": 4, # A or G
|
| 315 |
+
"Y": 4, # C or U
|
| 316 |
+
"S": 4, # G or C
|
| 317 |
+
"W": 4, # A or U
|
| 318 |
+
"K": 4, # G or U
|
| 319 |
+
"M": 4, # A or C
|
| 320 |
+
"B": 4, # C, G, U
|
| 321 |
+
"D": 4, # A, G, U
|
| 322 |
+
"H": 4, # A, C, U
|
| 323 |
+
"V": 4, # A, C, G
|
| 324 |
+
"X": 4,
|
| 325 |
+
"I": 4,
|
| 326 |
+
"T": 4,
|
| 327 |
+
"-": 5,
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
# Partial inversion of RNA_NT_TO_ID
|
| 331 |
+
RNA_ID_TO_NT = {
|
| 332 |
+
0: "A",
|
| 333 |
+
1: "G",
|
| 334 |
+
2: "C",
|
| 335 |
+
3: "U",
|
| 336 |
+
4: "N", # Also R, Y, S, W, K, M, B, D, H
|
| 337 |
+
5: "-",
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def get_all_elems():
|
| 342 |
+
"""
|
| 343 |
+
Retrieve a list of all element symbols from the RDKit periodic table up to a specified cutoff.
|
| 344 |
+
|
| 345 |
+
Returns:
|
| 346 |
+
list: A list of element symbols strings.
|
| 347 |
+
"""
|
| 348 |
+
elem_list = []
|
| 349 |
+
pt = GetPeriodicTable()
|
| 350 |
+
for i in range(1, 119):
|
| 351 |
+
elem_list.append(pt.GetElementSymbol(i).upper())
|
| 352 |
+
|
| 353 |
+
# 128 elements in the AlphaFold3 SI Table 5 ref_element
|
| 354 |
+
elem_list += [f"UNK_ELEM_{i}" for i in range(119, 129)]
|
| 355 |
+
return elem_list
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# len(STD_RESIDUES) + Atomic number up to 118 + 10 UNK_ELEM
|
| 359 |
+
ELEMS = dict([(i, len(STD_RESIDUES) + idx) for idx, i in enumerate(get_all_elems())])
|
| 360 |
+
|
| 361 |
+
RES_ATOMS_DICT = {
|
| 362 |
+
"ALA": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OXT": 5},
|
| 363 |
+
"ARG": {
|
| 364 |
+
"N": 0,
|
| 365 |
+
"CA": 1,
|
| 366 |
+
"C": 2,
|
| 367 |
+
"O": 3,
|
| 368 |
+
"CB": 4,
|
| 369 |
+
"CG": 5,
|
| 370 |
+
"CD": 6,
|
| 371 |
+
"NE": 7,
|
| 372 |
+
"CZ": 8,
|
| 373 |
+
"NH1": 9,
|
| 374 |
+
"NH2": 10,
|
| 375 |
+
"OXT": 11,
|
| 376 |
+
},
|
| 377 |
+
"ASN": {
|
| 378 |
+
"N": 0,
|
| 379 |
+
"CA": 1,
|
| 380 |
+
"C": 2,
|
| 381 |
+
"O": 3,
|
| 382 |
+
"CB": 4,
|
| 383 |
+
"CG": 5,
|
| 384 |
+
"OD1": 6,
|
| 385 |
+
"ND2": 7,
|
| 386 |
+
"OXT": 8,
|
| 387 |
+
},
|
| 388 |
+
"ASP": {
|
| 389 |
+
"N": 0,
|
| 390 |
+
"CA": 1,
|
| 391 |
+
"C": 2,
|
| 392 |
+
"O": 3,
|
| 393 |
+
"CB": 4,
|
| 394 |
+
"CG": 5,
|
| 395 |
+
"OD1": 6,
|
| 396 |
+
"OD2": 7,
|
| 397 |
+
"OXT": 8,
|
| 398 |
+
},
|
| 399 |
+
"CYS": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "SG": 5, "OXT": 6},
|
| 400 |
+
"GLN": {
|
| 401 |
+
"N": 0,
|
| 402 |
+
"CA": 1,
|
| 403 |
+
"C": 2,
|
| 404 |
+
"O": 3,
|
| 405 |
+
"CB": 4,
|
| 406 |
+
"CG": 5,
|
| 407 |
+
"CD": 6,
|
| 408 |
+
"OE1": 7,
|
| 409 |
+
"NE2": 8,
|
| 410 |
+
"OXT": 9,
|
| 411 |
+
},
|
| 412 |
+
"GLU": {
|
| 413 |
+
"N": 0,
|
| 414 |
+
"CA": 1,
|
| 415 |
+
"C": 2,
|
| 416 |
+
"O": 3,
|
| 417 |
+
"CB": 4,
|
| 418 |
+
"CG": 5,
|
| 419 |
+
"CD": 6,
|
| 420 |
+
"OE1": 7,
|
| 421 |
+
"OE2": 8,
|
| 422 |
+
"OXT": 9,
|
| 423 |
+
},
|
| 424 |
+
"GLY": {"N": 0, "CA": 1, "C": 2, "O": 3, "OXT": 4},
|
| 425 |
+
"HIS": {
|
| 426 |
+
"N": 0,
|
| 427 |
+
"CA": 1,
|
| 428 |
+
"C": 2,
|
| 429 |
+
"O": 3,
|
| 430 |
+
"CB": 4,
|
| 431 |
+
"CG": 5,
|
| 432 |
+
"ND1": 6,
|
| 433 |
+
"CD2": 7,
|
| 434 |
+
"CE1": 8,
|
| 435 |
+
"NE2": 9,
|
| 436 |
+
"OXT": 10,
|
| 437 |
+
},
|
| 438 |
+
"ILE": {
|
| 439 |
+
"N": 0,
|
| 440 |
+
"CA": 1,
|
| 441 |
+
"C": 2,
|
| 442 |
+
"O": 3,
|
| 443 |
+
"CB": 4,
|
| 444 |
+
"CG1": 5,
|
| 445 |
+
"CG2": 6,
|
| 446 |
+
"CD1": 7,
|
| 447 |
+
"OXT": 8,
|
| 448 |
+
},
|
| 449 |
+
"LEU": {
|
| 450 |
+
"N": 0,
|
| 451 |
+
"CA": 1,
|
| 452 |
+
"C": 2,
|
| 453 |
+
"O": 3,
|
| 454 |
+
"CB": 4,
|
| 455 |
+
"CG": 5,
|
| 456 |
+
"CD1": 6,
|
| 457 |
+
"CD2": 7,
|
| 458 |
+
"OXT": 8,
|
| 459 |
+
},
|
| 460 |
+
"LYS": {
|
| 461 |
+
"N": 0,
|
| 462 |
+
"CA": 1,
|
| 463 |
+
"C": 2,
|
| 464 |
+
"O": 3,
|
| 465 |
+
"CB": 4,
|
| 466 |
+
"CG": 5,
|
| 467 |
+
"CD": 6,
|
| 468 |
+
"CE": 7,
|
| 469 |
+
"NZ": 8,
|
| 470 |
+
"OXT": 9,
|
| 471 |
+
},
|
| 472 |
+
"MET": {
|
| 473 |
+
"N": 0,
|
| 474 |
+
"CA": 1,
|
| 475 |
+
"C": 2,
|
| 476 |
+
"O": 3,
|
| 477 |
+
"CB": 4,
|
| 478 |
+
"CG": 5,
|
| 479 |
+
"SD": 6,
|
| 480 |
+
"CE": 7,
|
| 481 |
+
"OXT": 8,
|
| 482 |
+
},
|
| 483 |
+
"PHE": {
|
| 484 |
+
"N": 0,
|
| 485 |
+
"CA": 1,
|
| 486 |
+
"C": 2,
|
| 487 |
+
"O": 3,
|
| 488 |
+
"CB": 4,
|
| 489 |
+
"CG": 5,
|
| 490 |
+
"CD1": 6,
|
| 491 |
+
"CD2": 7,
|
| 492 |
+
"CE1": 8,
|
| 493 |
+
"CE2": 9,
|
| 494 |
+
"CZ": 10,
|
| 495 |
+
"OXT": 11,
|
| 496 |
+
},
|
| 497 |
+
"PRO": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG": 5, "CD": 6, "OXT": 7},
|
| 498 |
+
"SER": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OG": 5, "OXT": 6},
|
| 499 |
+
"THR": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "OG1": 5, "CG2": 6, "OXT": 7},
|
| 500 |
+
"TRP": {
|
| 501 |
+
"N": 0,
|
| 502 |
+
"CA": 1,
|
| 503 |
+
"C": 2,
|
| 504 |
+
"O": 3,
|
| 505 |
+
"CB": 4,
|
| 506 |
+
"CG": 5,
|
| 507 |
+
"CD1": 6,
|
| 508 |
+
"CD2": 7,
|
| 509 |
+
"NE1": 8,
|
| 510 |
+
"CE2": 9,
|
| 511 |
+
"CE3": 10,
|
| 512 |
+
"CZ2": 11,
|
| 513 |
+
"CZ3": 12,
|
| 514 |
+
"CH2": 13,
|
| 515 |
+
"OXT": 14,
|
| 516 |
+
},
|
| 517 |
+
"TYR": {
|
| 518 |
+
"N": 0,
|
| 519 |
+
"CA": 1,
|
| 520 |
+
"C": 2,
|
| 521 |
+
"O": 3,
|
| 522 |
+
"CB": 4,
|
| 523 |
+
"CG": 5,
|
| 524 |
+
"CD1": 6,
|
| 525 |
+
"CD2": 7,
|
| 526 |
+
"CE1": 8,
|
| 527 |
+
"CE2": 9,
|
| 528 |
+
"CZ": 10,
|
| 529 |
+
"OH": 11,
|
| 530 |
+
"OXT": 12,
|
| 531 |
+
},
|
| 532 |
+
"VAL": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG1": 5, "CG2": 6, "OXT": 7},
|
| 533 |
+
"UNK": {"N": 0, "CA": 1, "C": 2, "O": 3, "CB": 4, "CG": 5, "OXT": 6},
|
| 534 |
+
"DA": {
|
| 535 |
+
"OP3": 0,
|
| 536 |
+
"P": 1,
|
| 537 |
+
"OP1": 2,
|
| 538 |
+
"OP2": 3,
|
| 539 |
+
"O5'": 4,
|
| 540 |
+
"C5'": 5,
|
| 541 |
+
"C4'": 6,
|
| 542 |
+
"O4'": 7,
|
| 543 |
+
"C3'": 8,
|
| 544 |
+
"O3'": 9,
|
| 545 |
+
"C2'": 10,
|
| 546 |
+
"C1'": 11,
|
| 547 |
+
"N9": 12,
|
| 548 |
+
"C8": 13,
|
| 549 |
+
"N7": 14,
|
| 550 |
+
"C5": 15,
|
| 551 |
+
"C6": 16,
|
| 552 |
+
"N6": 17,
|
| 553 |
+
"N1": 18,
|
| 554 |
+
"C2": 19,
|
| 555 |
+
"N3": 20,
|
| 556 |
+
"C4": 21,
|
| 557 |
+
},
|
| 558 |
+
"DC": {
|
| 559 |
+
"OP3": 0,
|
| 560 |
+
"P": 1,
|
| 561 |
+
"OP1": 2,
|
| 562 |
+
"OP2": 3,
|
| 563 |
+
"O5'": 4,
|
| 564 |
+
"C5'": 5,
|
| 565 |
+
"C4'": 6,
|
| 566 |
+
"O4'": 7,
|
| 567 |
+
"C3'": 8,
|
| 568 |
+
"O3'": 9,
|
| 569 |
+
"C2'": 10,
|
| 570 |
+
"C1'": 11,
|
| 571 |
+
"N1": 12,
|
| 572 |
+
"C2": 13,
|
| 573 |
+
"O2": 14,
|
| 574 |
+
"N3": 15,
|
| 575 |
+
"C4": 16,
|
| 576 |
+
"N4": 17,
|
| 577 |
+
"C5": 18,
|
| 578 |
+
"C6": 19,
|
| 579 |
+
},
|
| 580 |
+
"DG": {
|
| 581 |
+
"OP3": 0,
|
| 582 |
+
"P": 1,
|
| 583 |
+
"OP1": 2,
|
| 584 |
+
"OP2": 3,
|
| 585 |
+
"O5'": 4,
|
| 586 |
+
"C5'": 5,
|
| 587 |
+
"C4'": 6,
|
| 588 |
+
"O4'": 7,
|
| 589 |
+
"C3'": 8,
|
| 590 |
+
"O3'": 9,
|
| 591 |
+
"C2'": 10,
|
| 592 |
+
"C1'": 11,
|
| 593 |
+
"N9": 12,
|
| 594 |
+
"C8": 13,
|
| 595 |
+
"N7": 14,
|
| 596 |
+
"C5": 15,
|
| 597 |
+
"C6": 16,
|
| 598 |
+
"O6": 17,
|
| 599 |
+
"N1": 18,
|
| 600 |
+
"C2": 19,
|
| 601 |
+
"N2": 20,
|
| 602 |
+
"N3": 21,
|
| 603 |
+
"C4": 22,
|
| 604 |
+
},
|
| 605 |
+
"DT": {
|
| 606 |
+
"OP3": 0,
|
| 607 |
+
"P": 1,
|
| 608 |
+
"OP1": 2,
|
| 609 |
+
"OP2": 3,
|
| 610 |
+
"O5'": 4,
|
| 611 |
+
"C5'": 5,
|
| 612 |
+
"C4'": 6,
|
| 613 |
+
"O4'": 7,
|
| 614 |
+
"C3'": 8,
|
| 615 |
+
"O3'": 9,
|
| 616 |
+
"C2'": 10,
|
| 617 |
+
"C1'": 11,
|
| 618 |
+
"N1": 12,
|
| 619 |
+
"C2": 13,
|
| 620 |
+
"O2": 14,
|
| 621 |
+
"N3": 15,
|
| 622 |
+
"C4": 16,
|
| 623 |
+
"O4": 17,
|
| 624 |
+
"C5": 18,
|
| 625 |
+
"C7": 19,
|
| 626 |
+
"C6": 20,
|
| 627 |
+
},
|
| 628 |
+
"DN": {
|
| 629 |
+
"OP3": 0,
|
| 630 |
+
"P": 1,
|
| 631 |
+
"OP1": 2,
|
| 632 |
+
"OP2": 3,
|
| 633 |
+
"O5'": 4,
|
| 634 |
+
"C5'": 5,
|
| 635 |
+
"C4'": 6,
|
| 636 |
+
"O4'": 7,
|
| 637 |
+
"C3'": 8,
|
| 638 |
+
"O3'": 9,
|
| 639 |
+
"C2'": 10,
|
| 640 |
+
"C1'": 11,
|
| 641 |
+
},
|
| 642 |
+
"A": {
|
| 643 |
+
"OP3": 0,
|
| 644 |
+
"P": 1,
|
| 645 |
+
"OP1": 2,
|
| 646 |
+
"OP2": 3,
|
| 647 |
+
"O5'": 4,
|
| 648 |
+
"C5'": 5,
|
| 649 |
+
"C4'": 6,
|
| 650 |
+
"O4'": 7,
|
| 651 |
+
"C3'": 8,
|
| 652 |
+
"O3'": 9,
|
| 653 |
+
"C2'": 10,
|
| 654 |
+
"O2'": 11,
|
| 655 |
+
"C1'": 12,
|
| 656 |
+
"N9": 13,
|
| 657 |
+
"C8": 14,
|
| 658 |
+
"N7": 15,
|
| 659 |
+
"C5": 16,
|
| 660 |
+
"C6": 17,
|
| 661 |
+
"N6": 18,
|
| 662 |
+
"N1": 19,
|
| 663 |
+
"C2": 20,
|
| 664 |
+
"N3": 21,
|
| 665 |
+
"C4": 22,
|
| 666 |
+
},
|
| 667 |
+
"C": {
|
| 668 |
+
"OP3": 0,
|
| 669 |
+
"P": 1,
|
| 670 |
+
"OP1": 2,
|
| 671 |
+
"OP2": 3,
|
| 672 |
+
"O5'": 4,
|
| 673 |
+
"C5'": 5,
|
| 674 |
+
"C4'": 6,
|
| 675 |
+
"O4'": 7,
|
| 676 |
+
"C3'": 8,
|
| 677 |
+
"O3'": 9,
|
| 678 |
+
"C2'": 10,
|
| 679 |
+
"O2'": 11,
|
| 680 |
+
"C1'": 12,
|
| 681 |
+
"N1": 13,
|
| 682 |
+
"C2": 14,
|
| 683 |
+
"O2": 15,
|
| 684 |
+
"N3": 16,
|
| 685 |
+
"C4": 17,
|
| 686 |
+
"N4": 18,
|
| 687 |
+
"C5": 19,
|
| 688 |
+
"C6": 20,
|
| 689 |
+
},
|
| 690 |
+
"G": {
|
| 691 |
+
"OP3": 0,
|
| 692 |
+
"P": 1,
|
| 693 |
+
"OP1": 2,
|
| 694 |
+
"OP2": 3,
|
| 695 |
+
"O5'": 4,
|
| 696 |
+
"C5'": 5,
|
| 697 |
+
"C4'": 6,
|
| 698 |
+
"O4'": 7,
|
| 699 |
+
"C3'": 8,
|
| 700 |
+
"O3'": 9,
|
| 701 |
+
"C2'": 10,
|
| 702 |
+
"O2'": 11,
|
| 703 |
+
"C1'": 12,
|
| 704 |
+
"N9": 13,
|
| 705 |
+
"C8": 14,
|
| 706 |
+
"N7": 15,
|
| 707 |
+
"C5": 16,
|
| 708 |
+
"C6": 17,
|
| 709 |
+
"O6": 18,
|
| 710 |
+
"N1": 19,
|
| 711 |
+
"C2": 20,
|
| 712 |
+
"N2": 21,
|
| 713 |
+
"N3": 22,
|
| 714 |
+
"C4": 23,
|
| 715 |
+
},
|
| 716 |
+
"U": {
|
| 717 |
+
"OP3": 0,
|
| 718 |
+
"P": 1,
|
| 719 |
+
"OP1": 2,
|
| 720 |
+
"OP2": 3,
|
| 721 |
+
"O5'": 4,
|
| 722 |
+
"C5'": 5,
|
| 723 |
+
"C4'": 6,
|
| 724 |
+
"O4'": 7,
|
| 725 |
+
"C3'": 8,
|
| 726 |
+
"O3'": 9,
|
| 727 |
+
"C2'": 10,
|
| 728 |
+
"O2'": 11,
|
| 729 |
+
"C1'": 12,
|
| 730 |
+
"N1": 13,
|
| 731 |
+
"C2": 14,
|
| 732 |
+
"O2": 15,
|
| 733 |
+
"N3": 16,
|
| 734 |
+
"C4": 17,
|
| 735 |
+
"O4": 18,
|
| 736 |
+
"C5": 19,
|
| 737 |
+
"C6": 20,
|
| 738 |
+
},
|
| 739 |
+
"N": {
|
| 740 |
+
"OP3": 0,
|
| 741 |
+
"P": 1,
|
| 742 |
+
"OP1": 2,
|
| 743 |
+
"OP2": 3,
|
| 744 |
+
"O5'": 4,
|
| 745 |
+
"C5'": 5,
|
| 746 |
+
"C4'": 6,
|
| 747 |
+
"O4'": 7,
|
| 748 |
+
"C3'": 8,
|
| 749 |
+
"O3'": 9,
|
| 750 |
+
"C2'": 10,
|
| 751 |
+
"O2'": 11,
|
| 752 |
+
"C1'": 12,
|
| 753 |
+
},
|
| 754 |
+
}
|
| 755 |
+
|
| 756 |
+
CRYSTALLIZATION_AIDS = (
|
| 757 |
+
"SO4",
|
| 758 |
+
"GOL",
|
| 759 |
+
"EDO",
|
| 760 |
+
"PO4",
|
| 761 |
+
"ACT",
|
| 762 |
+
"PEG",
|
| 763 |
+
"DMS",
|
| 764 |
+
"TRS",
|
| 765 |
+
"PGE",
|
| 766 |
+
"PG4",
|
| 767 |
+
"FMT",
|
| 768 |
+
"EPE",
|
| 769 |
+
"MPD",
|
| 770 |
+
"MES",
|
| 771 |
+
"CD",
|
| 772 |
+
"IOD",
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
PROT_STD_RESIDUES_ONE_TO_THREE = {
|
| 776 |
+
"A": "ALA",
|
| 777 |
+
"R": "ARG",
|
| 778 |
+
"N": "ASN",
|
| 779 |
+
"D": "ASP",
|
| 780 |
+
"C": "CYS",
|
| 781 |
+
"Q": "GLN",
|
| 782 |
+
"E": "GLU",
|
| 783 |
+
"G": "GLY",
|
| 784 |
+
"H": "HIS",
|
| 785 |
+
"I": "ILE",
|
| 786 |
+
"L": "LEU",
|
| 787 |
+
"K": "LYS",
|
| 788 |
+
"M": "MET",
|
| 789 |
+
"F": "PHE",
|
| 790 |
+
"P": "PRO",
|
| 791 |
+
"S": "SER",
|
| 792 |
+
"T": "THR",
|
| 793 |
+
"W": "TRP",
|
| 794 |
+
"Y": "TYR",
|
| 795 |
+
"V": "VAL",
|
| 796 |
+
"X": "UNK",
|
| 797 |
+
}
|
| 798 |
+
|
| 799 |
+
CRYSTALLIZATION_AIDS = (
|
| 800 |
+
"SO4",
|
| 801 |
+
"GOL",
|
| 802 |
+
"EDO",
|
| 803 |
+
"PO4",
|
| 804 |
+
"ACT",
|
| 805 |
+
"PEG",
|
| 806 |
+
"DMS",
|
| 807 |
+
"TRS",
|
| 808 |
+
"PGE",
|
| 809 |
+
"PG4",
|
| 810 |
+
"FMT",
|
| 811 |
+
"EPE",
|
| 812 |
+
"MPD",
|
| 813 |
+
"MES",
|
| 814 |
+
"CD",
|
| 815 |
+
"IOD",
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
### Molecule Constants ###
|
| 820 |
+
# AlphaFold3 SI Tabel 9
|
| 821 |
+
LIGAND_EXCLUSION = {'144', 'SEP', 'PG0', 'BEN', 'NH4', 'PO4', '3SY', 'BO3', 'UNL', 'MES', 'FLC', 'PGR', '15P', 'MYR',
|
| 822 |
+
'POL', 'CIT', 'N', 'SPD', 'CAQ', 'IPA', 'EGL', 'SAR', 'NO3', 'STU', 'NHE', 'BU1', 'OXA', 'TPO',
|
| 823 |
+
'EEE', 'CAD', 'CBM', 'SPM', 'BCN', 'FMT', 'PEP', 'CM', 'BAM', 'ETF', 'IOD', 'MLI', 'MRD', 'SCN',
|
| 824 |
+
'GSH', 'CCN', 'SR', '1PE', 'ACY', 'STE', '9JE', 'SEO', 'IHS', 'MLA', 'TBU', 'DEP', 'STO', 'ACE',
|
| 825 |
+
'NA', 'TRS', 'CPT', 'OHE', 'TME', 'CL', 'BME', 'DN', '3HR', 'LDA', 'SO4', 'MPD', 'OLC', 'DOD',
|
| 826 |
+
'PE4', 'DOX', 'CMO', 'POP', 'PG4', '2F2', 'DMS', 'IMD', 'NH2', 'EOX', 'IPH', 'ACT', '7PE', 'UNX',
|
| 827 |
+
'GTT', '7N5', 'AZI', 'FCY', 'SIN', 'AAE', 'BTB', 'BTC', 'PGE', 'PE3', 'MB3', 'EDO', 'PLM', 'BCT',
|
| 828 |
+
'EOH', 'P6G', 'ACN', 'D10', 'EPE', 'DIO', 'CO3', 'PVO', 'TAR', 'URE', 'BDN', 'GOL', 'MSE', 'HED',
|
| 829 |
+
'CLR', 'MEG', 'IHP', 'PEO', 'CXS', 'MOH', 'GYF', 'PEG', 'FJO', 'FW5', 'OLA', '2JC', 'ABA', 'O4B',
|
| 830 |
+
'UPL', 'OME', 'C8E', 'OMB', 'UNK'} # fmt: skip
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
# AlphaFold3 SI Tabel 11
|
| 834 |
+
GLYCANS = {'79J', 'LXZ', 'KO1', 'Z57', 'XDX', '8OQ', 'G0S', '14T', 'ZB3', '9PG', 'BGL', 'GYU', 'AHG', 'SUC', 'ADA', 'NGR',
|
| 835 |
+
'4R1', 'EBQ', 'GAF', 'NAA', 'GYP', 'NDG', 'U2D', 'ISL', '9GP', 'KDM', 'HSX', 'NYT', 'V3P', '4NN', 'Z3L', 'ZCZ',
|
| 836 |
+
'D5E', 'RIP', '3LR', 'GL1', 'K99', 'MQG', 'RAM', 'TUP', 'KDB', 'SIO', 'Z5L', 'GUL', 'GU2', 'EQV', '0V4', 'ABD',
|
| 837 |
+
'RY7', '5II', 'GAL', '2GL', 'DR5', '4RS', 'MNA', 'DFX', '0WK', 'HTG', 'RP5', 'A1Q', 'B1N', 'GUF', 'NGA', 'TMR',
|
| 838 |
+
'C3X', '9S7', 'XLS', 'MAG', 'RST', 'SDY', 'HSH', 'GN4', 'GTR', 'KBA', '6YR', 'CKB', 'DDA', 'RHC', 'OPM', 'SIZ',
|
| 839 |
+
'GE3', 'TS8', 'Z6W', 'BZD', '56N', 'RIB', 'GL6', '8GA', 'GLC', 'TAG', 'QIF', 'TA6', 'UAP', 'TVY', 'GC1', 'ARW',
|
| 840 |
+
'GU3', 'LBS', 'KDD', 'NPF', '49V', 'CDR', '12E', '6LA', '2M4', 'SA0', 'HNW', 'AOG', 'G8Z', '8LR', 'GPH', 'XXX',
|
| 841 |
+
'GPM', 'MTT', 'JFZ', 'LOG', 'LMO', '5TH', '8I4', 'GUP', '5KQ', 'R2G', 'SSG', 'P8E', 'RF5', 'TOC', 'CT3', '2FL',
|
| 842 |
+
'73E', 'VJ4', '0H0', 'ERI', 'AMG', '3GR', 'BO1', 'AFD', 'FYJ', 'IDF', 'NBY', 'DOM', 'MBF', 'QDK', 'TDG', '6GR',
|
| 843 |
+
'MAV', '1X4', 'AF1', 'EEN', 'ZB1', 'Z2D', '445', 'KHP', 'LKS', '10M', '491', 'OTU', 'BNG', 'AY9', 'KDR', 'LEC',
|
| 844 |
+
'FFX', 'AFO', 'SGA', '16F', 'X34', 'SEJ', 'LAG', 'DNO', '6PZ', 'LBT', 'OSU', '3BU', '6K3', 'SFU', 'YDR', 'SIA',
|
| 845 |
+
'2WP', '25E', 'SMD', 'NBG', 'DO8', 'LGU', 'S81', 'Z3Q', 'TWA', 'G6S', '2WS', 'G6D', '18D', 'IN1', '64K', 'QPS',
|
| 846 |
+
'PTQ', 'FX1', 'RVM', '8GP', 'NLC', 'FCA', 'JLT', 'AH8', 'MFB', 'RRJ', 'SOL', 'TM5', 'TCB', 'GU5', 'TWY', 'ETT',
|
| 847 |
+
'8YV', 'SG6', 'XMM', '17T', 'BGC', 'MLR', 'Z6J', '9SJ', 'R2B', 'BBK', 'BEM', 'LTG', '0NZ', 'DKZ', '3YW', 'ASO',
|
| 848 |
+
'FUB', '4GL', 'GLT', 'KTU', 'CBF', 'ARI', 'FIF', 'LCN', 'SG5', 'AC1', 'SUP', 'ZMR', 'GU8', 'YYH', 'XKJ', 'JSV',
|
| 849 |
+
'DQR', 'M6D', 'FBP', 'AFP', 'F6P', 'GLG', 'JZR', 'DLG', '9C1', 'AAL', 'RRY', 'ZDC', 'TVS', 'B1H', 'XXM', '8B7',
|
| 850 |
+
'RCD', 'UBO', '7D1', 'XYT', 'WZ2', 'X1X', 'LRH', 'GDA', 'GLS', 'G6P', '49A', 'NM9', 'DVC', 'MG5', 'SCR', 'MAF',
|
| 851 |
+
'149', 'LFC', 'FMF', 'FRU', 'BG8', 'GP4', 'GU1', 'XXR', '4V5', 'MA2', '293', '6KH', 'GAA', 'MXY', 'QV4', 'MSX',
|
| 852 |
+
'GU6', '95Z', 'Z9M', 'ARB', 'FNY', 'H1S', 'VG1', 'VTB', 'Z61', 'H6Z', '7K3', 'XGP', 'SOE', 'Z6H', 'GYV', 'MLB',
|
| 853 |
+
'DR3', 'ISD', 'BGN', 'AXR', 'SCG', 'Z8T', '6UD', 'KDF', 'GLA', 'BNX', '3MG', 'BDP', 'KFN', 'Z9N', '2FG', 'PNA',
|
| 854 |
+
'MUB', 'ZDO', '9WJ', 'GMB', 'LER', 'TVM', '89Y', 'Z4Y', '9SM', 'NGS', 'LAO', 'KGM', 'FKD', 'M1F', 'BG6', 'LAK',
|
| 855 |
+
'8GG', '6LS', 'GBH', 'CEG', 'BDR', 'RR7', 'SOG', 'AZC', 'AMU', 'BS7', '3S6', 'MXZ', 'Z3U', 'MDP', '6MJ', 'M3M',
|
| 856 |
+
'DT6', 'PRP', 'TUG', 'Z16', 'IDG', 'TUR', 'Z4S', 'GM0', 'A0K', 'GCN', 'ZEE', 'UEA', 'HVC', 'CE5', 'FUD', 'NAG',
|
| 857 |
+
'GPO', '22S', '3J4', 'DKX', 'FMO', 'BXP', 'NSQ', '50A', 'MAT', '5TM', '0MK', '9OK', 'RI2', 'SZZ', 'IDS', 'JRV',
|
| 858 |
+
'18O', '1CF', 'RAO', 'P53', '27C', 'Z3K', 'Z4U', 'Z4R', 'B4G', '6KU', 'HBZ', '07E', 'KBG', '98U', 'GFP', 'LFR',
|
| 859 |
+
'G2F', '51N', 'FUF', 'LGC', '6S2', 'E3M', 'G7P', 'OTN', 'MVP', 'TVD', 'BBV', 'E5G', 'MJJ', 'IEM', 'FSA', 'CE8',
|
| 860 |
+
'U1Y', '1FT', 'HTM', 'DLD', 'YO5', 'W9T', '5N6', 'PNG', 'NGY', 'DSR', 'M3N', 'GP0', '3MK', 'RBL', 'GTM', 'FSW',
|
| 861 |
+
'4JA', 'YYM', 'Z4V', '3HD', '2DR', 'AIG', 'GL0', 'BND', 'TM6', 'TUJ', 'DAN', '5GF', '4QY', '3FM', '6KW', 'LNV',
|
| 862 |
+
'289', 'BFN', 'PSG', 'U9J', 'YX0', 'EQP', 'YZ0', '0BD', 'GAT', 'LVZ', 'FUL', '22O', 'DLF', 'MA1', 'BXY', 'C3G',
|
| 863 |
+
'CR6', 'GNS', 'EEQ', 'IDY', 'FFC', 'NBX', 'SID', '9KJ', '9WZ', 'M2F', 'FK9', 'SSH', 'TWG', 'RVG', 'BXX', '24S',
|
| 864 |
+
'FSM', 'GDL', 'F1X', '3R3', 'ALX', '4GC', 'GL2', 'DL6', 'GS1', 'AMV', 'TVV', '2DG', 'RGG', 'TFU', '1GN', 'N3U',
|
| 865 |
+
'SOR', 'MA3', 'GCT', 'H1M', '16G', '49T', 'BCD', 'GPW', 'DAG', 'GN1', 'IAB', 'EBG', 'GPU', '38J', '1LL', 'DR2',
|
| 866 |
+
'YIO', 'YKR', '15L', 'WZ1', 'BTG', 'GPK', '5MM', '26O', 'AMN', 'DEL', 'CTT', '83Y', 'GMT', 'CTO', 'MBE', '1SD',
|
| 867 |
+
'6ZC', 'AXP', 'OX2', '5LT', 'MRH', '6BG', 'MDA', 'SG7', '045', 'GC4', 'LDY', 'YYJ', '07Y', 'KDO', 'GP1', 'BHG',
|
| 868 |
+
'DPC', 'BM3', 'GU4', 'ISX', 'P6P', 'GPQ', '1S4', '475', 'GYE', 'CBK', 'CEZ', 'SGD', 'TH1', 'V3M', 'RWI', 'RM4',
|
| 869 |
+
'U9M', 'U2A', '7GP', '05L', 'Z0F', 'GLO', 'LXB', 'TGA', '61J', 'GYG', 'GCU', 'GE1', 'F1P', 'GLP', 'CTR', 'AHR',
|
| 870 |
+
'3LJ', 'FUY', 'JVA', 'LAT', 'NHF', 'RB5', 'XYS', 'LXC', 'SLT', 'U8V', 'GMH', 'EAG', 'GCV', 'B6D', 'IDU', 'KG1',
|
| 871 |
+
'BDF', 'NTP', 'IXD', 'RZM', 'PH5', 'SHB', 'X6Y', 'B16', 'Z9E', '9VP', 'LAH', 'H2P', 'TNX', '5GO', 'TGY', '5SP',
|
| 872 |
+
'RHA', '5KV', 'GTK', 'SUS', 'DAF', '6DM', '8S0', '6MN', 'G4D', 'NT1', 'XYF', '5TJ', '46Z', '9AM', '7K2', '6C2',
|
| 873 |
+
'WIA', '9YW', 'G4S', '46D', 'Z9W', 'ABL', 'XYZ', 'G3I', 'S7P', 'GC9', 'GQ1', 'GCO', 'M6P', 'WUN', 'U63', 'ZB2',
|
| 874 |
+
'GLD', 'T6P', 'ZEL', '145', '2OS', 'BGP', 'C4W', 'IDX', 'MUR', '3SA', 'CR1', '34V', 'DEG', 'F55', 'L0W', 'TYV',
|
| 875 |
+
'CJB', 'TW7', 'DDL', '5L3', 'NGC', 'ACX', 'JVS', 'NA1', 'GAD', '7JZ', 'BOG', 'GCW', 'BDG', 'Z15', '0LP', 'ABE',
|
| 876 |
+
'RG1', 'DGU', 'N1L', 'NGE', 'PUF', 'B9D', '49S', '5LS', '4N2', '23V', 'RUU', 'B0D', 'RTV', '42D', 'M1P', 'MAB',
|
| 877 |
+
'2F8', 'TQY', 'L6S', 'V71', '2H5', 'M8C', 'NTF', 'H3S', 'LM2', 'MN0', 'JV4', '9WN', 'U9G', 'LZ0', 'X0X', 'TXB',
|
| 878 |
+
'3DO', 'SG4', 'IDR', '8B9', 'TOA', 'CRA', 'HSJ', '0HX', 'FDQ', 'FUC', 'ABF', 'ALL', 'G20', 'GL9', 'IDC', 'LOX',
|
| 879 |
+
'Z2T', 'RP6', '2HA', 'AHM', 'DRI', 'EMZ', 'GMZ', 'HD4', 'GU9', 'L1L', 'PNW', 'PPC', 'MMA', 'CE6', '5KS', 'MGC',
|
| 880 |
+
'XLF', 'KO2', 'RUG', 'HSG', 'SF6', 'IPT', 'TF0', 'GCD', 'B8D', '0YT', 'GRX', 'HNV', 'FVQ', 'RV7', 'J5B', 'ERE',
|
| 881 |
+
'DFR', 'LVO', '4GP', 'BQY', 'BMA', 'KDA', 'ARA', 'KDN', 'ZCD', 'A5C', 'T68', 'XYL', 'YJM', 'NM6', '9CD', 'CNP',
|
| 882 |
+
'U97', '9T1', 'C5X', 'R1X', 'BW3', '09X', 'GNX', 'PDX', 'Z9D', 'DGO', 'SLM', '66O', '4CQ', 'X6X', 'RTG', 'HSY',
|
| 883 |
+
'20X', 'GCB', 'EUS', 'FNG', '1S3', 'EGA', 'MQT', 'NXD', '5TK', 'Z9K', 'TGR', '9MR', 'M7P', 'PA1', 'MFU', 'UBH',
|
| 884 |
+
'CBI', 'TMX', 'T6D', '32O', 'JHM', 'X2F', '4SG', '3DY', 'SGC', 'PAV', 'A2G', 'LAI', '0UB', 'BXF', '3J3', '9T7',
|
| 885 |
+
'T6T', 'OI7', 'ANA', '9QG', 'K5B', 'KOT', 'GIV', 'MGL', 'GL4', '9SP', 'FDP', 'GPV', '6KS', 'GXV', 'NFG', 'M7B',
|
| 886 |
+
'DG0', '57S', 'GUZ', '96O', 'GCS', 'MAN', 'YYB', 'TWD', 'MGS', 'TT7', 'PNJ', 'GXL', 'TRE', 'G28', '7NU', '8PK',
|
| 887 |
+
'LKA', 'ASG', 'SF9', '2M8', '1GL', '5KT', 'BWG', 'OTG', 'VJ1', 'ZGE', '40J', 'Z4K', 'F58', 'KME', 'SR1', 'ZB0',
|
| 888 |
+
'UDC', '6KL', '6LW', '8EX', 'D1M', '62I', 'H6Q', 'RAE', 'SHD', 'AGL', 'DGS', 'VKN', 'TWJ', 'MRP', 'TGK', 'HSQ',
|
| 889 |
+
'ASC', 'F8X', '6GB', '0XY', 'BMX', 'SN5', 'Z5J', 'ZD0', 'DJB', 'KDE', 'TEU', 'M55', 'YYQ', 'DK4', 'D6G', 'KD5',
|
| 890 |
+
'AH2', '4AM', 'RER', '16O', 'C3B', 'G1P', 'NG6', 'MBG', 'Z4W', 'MAW', '147', 'NGK', 'CKP', 'DJE', 'GL5', 'TVG',
|
| 891 |
+
'PKM', 'L6T', 'XS2', '2GS', 'BTU', 'G16', 'PSV', 'AQA', 'MCU', 'SNG', '2M5', 'SLB', 'BM7', 'H53', 'MA8', 'OAK',
|
| 892 |
+
'GRF', 'BGS', 'NTO', 'YYK', 'EPG', '6GP', 'MYG', 'FCT', 'Z9H', 'GL7', '48Z', '4UZ', '7CV', 'DYM', 'GLF', 'GU0',
|
| 893 |
+
'CGF', 'STZ', '44S', 'LB2', 'TU4', 'Z8H', '5QP', 'A6P', 'XYP', 'B2G', 'U9A', 'SWE', 'NGZ', 'SGN', 'B7G', 'MAL',
|
| 894 |
+
'291', 'FSI', 'R1P', 'ACR', 'PZU', 'X2Y', 'Z9L', 'STW', 'U9D', 'X1P', 'TTV', 'GS9', 'QKH', 'SHG', 'N9S', 'NNG',
|
| 895 |
+
'RP3', 'G3F', 'YX1', 'EMP', 'XIL', '08U', 'WOO', 'FCB', 'NG1', 'TRV', '20S', 'RAF', 'GZL', 'C4B', '9SG', 'GAC'} # fmt: skip
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
# AlphaFold3 SI Tabel 12
|
| 899 |
+
IONS = {'XGP', 'Z4K', '147', 'B0D', 'G6D', 'RIB', 'AXR', 'SOG', 'NTF', 'SHB', 'RG1', 'G6S', 'GPO', 'BTG', '5LT', 'CEG', 'KG1',
|
| 900 |
+
'TDG', 'TRV', 'WZ1', 'ARI', 'HVC', 'TM6', '2DG', '6K3', 'ARA', 'ASO', '6GB', 'NBX', 'OTG', 'ASG', 'YO5', 'MRH', 'GYP',
|
| 901 |
+
'C4B', 'GDA', 'MUB', 'XXM', 'M6D', 'OPM', 'GYV', 'DKX', '9SG', 'LOG', 'TRE', 'DLG', 'FNG', 'BBK', 'ABF', 'AQA', '3BU',
|
| 902 |
+
'SIA', 'CGF', 'LBS', 'QV4', 'NAA', 'GLC', 'BHG', 'MSX', 'ZB1', 'YYJ', 'TUP', '6ZC', '0WK', 'RY7', 'L1L', 'RRY', 'M55',
|
| 903 |
+
'9PG', '5GF', '4V5', 'FMO', 'SWE', 'KDA', 'P8E', '14T', 'DL6', 'CKB', '2M8', 'AHR', 'NGY', '8GP', 'YYQ', 'LVO', 'CRA',
|
| 904 |
+
'GU9', 'PPC', '6GP', 'CR1', 'G20', 'T6P', 'EMZ', 'RHA', 'GC4', 'AH2', 'FCT', 'QDK', 'DDA', 'RTV', '8S0', 'TVG', 'HNV',
|
| 905 |
+
'FYJ', 'BDP', 'GYE', 'TS8', 'CEZ', '42D', 'NHF', 'NT1', 'WOO', '0LP', 'HBZ', 'SG5', 'NM9', 'CJB', 'DLF', 'EUS', 'IDY',
|
| 906 |
+
'2GL', 'NTO', 'PNG', 'B2G', '7NU', '4UZ', '5LS', '475', 'DJE', 'Z9E', 'GC9', 'QPS', '0NZ', 'F1X', 'G8Z', '2F8', '3SA',
|
| 907 |
+
'46D', '3DO', '6PZ', 'OI7', 'SLM', 'A0K', '9SJ', 'TWD', 'AOG', 'TW7', '2WS', 'GU5', 'NSQ', 'FUD', 'GLO', 'TNX', 'XYP',
|
| 908 |
+
'JFZ', '2HA', 'G16', 'V3M', 'RTG', 'C4W', 'R2G', 'HD4', '66O', 'MFB', 'GXL', '8YV', 'NFG', 'FFC', '3YW', 'XYZ', '445',
|
| 909 |
+
'IXD', 'GUL', 'CTO', '05L', 'Z3L', 'RBL', 'DR5', 'S81', 'CTR', '15L', 'GLP', '7K3', 'LDY', 'Z4S', 'H2P', '4GP', '5SP',
|
| 910 |
+
'18O', 'DGS', 'OX2', 'DFR', 'GN1', 'BGL', 'Z9K', 'GU4', '0V4', 'MA2', 'U2A', 'MXZ', 'PA1', '9YW', 'GS9', '3MK', 'AAL',
|
| 911 |
+
'NBY', 'XXX', 'ISD', 'SEJ', 'DKZ', 'GL9', '23V', 'AMN', 'AHG', '25E', 'DJB', '7K2', 'GDL', '08U', 'TT7', 'DRI', 'HSY',
|
| 912 |
+
'LB2', 'GCV', 'X1P', 'MN0', 'BW3', 'U9J', 'FFX', 'Z3U', 'LOX', 'MQG', 'HSG', 'GCO', 'GPQ', 'IDR', '2GS', 'AGL', 'RUU',
|
| 913 |
+
'5KV', 'R1X', 'LZ0', 'P6P', '0H0', '32O', 'LAG', 'YYK', '07E', '6KS', 'KOT', '17T', 'TQY', 'RM4', 'LNV', 'BGN', 'STW',
|
| 914 |
+
'NGC', 'GLF', '2WP', 'GL5', 'KHP', '9SP', 'LAI', 'KDB', 'JVA', 'OTN', 'NA1', 'RR7', 'B16', 'PSV', 'NXD', 'C5X', 'G1P',
|
| 915 |
+
'RRJ', 'DAF', '5N6', 'SG4', 'KDN', '95Z', 'FDQ', 'K5B', 'MDP', 'GTK', '4SG', 'ALL', 'LXC', 'TM5', 'NGA', '98U', '7JZ',
|
| 916 |
+
'A6P', 'UBH', '293', '9T7', 'PUF', '5TM', 'VTB', 'BGP', 'JV4', 'SN5', 'FSA', 'LAK', 'G7P', 'BGC', 'ZCD', '7GP', '79J',
|
| 917 |
+
'FKD', 'TWY', 'ZGE', 'OAK', 'FMF', 'ZCZ', 'GL2', 'MAV', 'ZB3', 'SA0', '3LR', 'SHD', 'XLS', 'DOM', 'Z4R', 'GP0', '5KS',
|
| 918 |
+
'KO1', 'FCB', 'LFC', 'AC1', 'NPF', 'X6Y', 'IDF', '20X', '6KL', '6LW', '49S', '0YT', 'BDR', 'GBH', 'LAH', 'KO2', '40J',
|
| 919 |
+
'4CQ', 'D5E', 'T6D', 'SUP', 'TGR', 'Z57', 'SDY', '4NN', 'MNA', 'Z5J', '20S', 'CT3', 'DQR', '5MM', '83Y', '49T', 'BDG',
|
| 920 |
+
'GL1', 'TOC', '6UD', 'GM0', 'GU3', '18D', 'ADA', '4AM', '9WZ', 'HSX', 'QIF', '6DM', '4RS', 'KDF', 'GAL', 'ISL', 'Z9H',
|
| 921 |
+
'GC1', 'Z9W', 'NBG', 'MAL', 'BGS', 'W9T', 'U9A', '62I', 'M6P', 'AFO', 'C3G', 'M2F', 'RUG', 'ARW', 'LEC', 'B8D', '61J',
|
| 922 |
+
'GL7', 'F58', 'GP4', 'GFP', 'TVY', 'ZB0', 'FSM', 'BDF', 'TCB', 'ZEL', 'IDG', '9CD', 'PNA', 'SF9', 'DSR', 'MG5', 'E5G',
|
| 923 |
+
'PNW', 'TH1', '1S4', 'PTQ', 'KDD', 'SSH', 'F55', 'V71', 'VG1', '9T1', '145', 'GU2', '2M5', '8I4', 'H1S', 'YYB', '1LL',
|
| 924 |
+
'4N2', 'BG6', 'R2B', 'MAT', 'LMO', 'OSU', 'PSG', 'RCD', '26O', 'DGO', 'SID', 'FUB', '2FL', '3HD', '34V', 'FK9', 'AMG',
|
| 925 |
+
'G4D', 'EPG', 'BWG', 'KTU', '491', 'JHM', 'NG1', 'DLD', 'MCU', 'MQT', 'EQV', 'CBF', '4GL', 'GS1', 'DEG', 'DDL', 'SGA',
|
| 926 |
+
'16O', 'X6X', 'H53', 'FUC', 'IDS', 'LTG', 'TMX', '9SM', '045', 'DAN', 'FRU', 'Z5L', 'AHM', 'BNG', 'AFP', 'MAF', 'UBO',
|
| 927 |
+
'BOG', '2H5', 'NG6', '10M', 'NM6', 'RST', 'C3X', '9S7', '49A', 'AXP', 'PH5', 'ISX', 'B6D', 'GU6', 'TWG', '6GR', 'H3S',
|
| 928 |
+
'Z61', '9WJ', 'BMA', 'U63', 'LKA', 'GRF', 'VJ1', 'RZM', 'MA3', '0XY', 'GAF', 'GAD', '1FT', '149', 'DPC', 'LFR', 'B9D',
|
| 929 |
+
'CE5', 'SOR', '6KU', 'SFU', 'BEM', 'YKR', '38J', 'N3U', 'ARB', 'CBK', 'SGD', '8EX', 'WZ2', '8B9', 'TF0', 'X2Y', 'PKM',
|
| 930 |
+
'RF5', 'D1M', 'AF1', 'DR2', 'EQP', 'AMV', 'PRP', 'VJ4', 'BCD', '1GN', 'SMD', '9QG', 'GCW', 'A5C', 'M3N', 'SZZ', 'B1H',
|
| 931 |
+
'GPH', 'NDG', '5KT', 'TYV', 'KDM', 'A2G', 'CE6', 'H1M', 'JVS', 'ABL', 'LAO', 'P53', 'GCN', 'QKH', 'U2D', 'YYH', '6S2',
|
| 932 |
+
'L0W', 'DEL', 'G2F', 'LER', 'MGC', 'RI2', '5KQ', 'DT6', 'U97', 'BG8', '1X4', 'GYG', 'U9D', 'SG7', '8B7', 'FCA', 'RWI',
|
| 933 |
+
'8GG', 'TAG', 'ERE', '46Z', '5QP', 'UDC', '51N', 'SGN', 'NLC', '8LR', 'L6T', 'WIA', 'TMR', 'IDC', 'GLT', 'FDP', 'GCT',
|
| 934 |
+
'FSW', 'XYS', 'GAA', 'N9S', 'DO8', 'UAP', 'TUG', 'F1P', '2FG', '12E', '56N', 'IAB', 'LAT', 'X1X', 'MBE', 'GP1', 'X34',
|
| 935 |
+
'6MJ', '6KH', 'G3F', '3DY', 'XYF', 'GE1', 'MAB', 'Z9L', '289', 'GIV', 'F8X', '9WN', 'KDO', 'GLA', 'SIZ', 'G0S', 'EGA',
|
| 936 |
+
'MJJ', 'B7G', 'BND', 'JRV', '1S3', 'DAG', 'GL0', 'GPV', 'HTM', '3R3', 'SHG', 'DR3', 'TTV', 'DK4', '22S', 'IDU', 'XIL',
|
| 937 |
+
'RER', '6BG', 'GXV', 'BTU', 'GE3', 'H6Z', 'ZD0', 'SF6', 'VKN', 'GYU', '16F', 'K99', 'KGM', 'FX1', 'NGS', 'RVG', 'YX1',
|
| 938 |
+
'4GC', 'EEQ', 'XDX', 'MVP', 'PNJ', 'BS7', 'M7B', '0BD', 'AIG', 'TVV', 'BXY', 'T68', 'SIO', '8OQ', '2OS', 'S7P', 'GNX',
|
| 939 |
+
'TUR', 'YX0', 'DVC', 'NGK', 'M8C', 'RHC', 'GPM', 'LKS', '64K', 'GMT', 'JLT', 'XS2', 'LBT', 'TVM', '6MN', 'DYM', 'E3M',
|
| 940 |
+
'NGR', 'G6P', 'RAO', 'SCR', 'YJM', 'MRP', 'YIO', 'ACR', '291', '3GR', 'M1F', 'L6S', 'XLF', 'GU1', 'LVZ', 'DNO', '22O',
|
| 941 |
+
'SOL', 'GPW', 'KD5', 'GCU', 'ERI', 'YZ0', 'TXB', 'ABD', 'YYM', 'BFN', 'G4S', 'GAC', 'PAV', 'MMA', 'RV7', 'MBG', '16G',
|
| 942 |
+
'MA8', 'GU8', '4JA', 'NTP', 'FNY', '07Y', '1CF', 'KDE', 'Z16', 'CBI', '50A', 'Z4W', 'U9G', 'D6G', 'JSV', 'YDR', 'DGU',
|
| 943 |
+
'Z15', 'G3I', 'XKJ', 'IEM', 'CDR', 'GLG', '0HX', 'TA6', '57S', 'LGU', '27C', 'BO1', 'EEN', 'HSJ', 'GLD', 'RP3', 'FSI',
|
| 944 |
+
'LRH', '8PK', 'GTR', 'B1N', 'XXR', 'TFU', 'RAF', 'ETT', 'AY9', '3FM', 'G28', '2DR', 'FUL', 'CE8', 'GQ1', 'TGA', '6C2',
|
| 945 |
+
'NGZ', '6LS', 'SOE', 'BQY', 'HSH', 'XYL', '5TH', 'A1Q', 'HTG', 'Z3K', '3MG', 'GMH', 'M1P', 'ASC', '73E', 'Z8T', 'STZ',
|
| 946 |
+
'RAE', 'GL6', '7CV', 'GPU', '5L3', '7D1', 'CKP', 'BXP', 'M7P', 'RVM', 'TWA', '4R1', 'N1L', 'X2F', 'TVD', '3J3', 'TOA',
|
| 947 |
+
'B4G', 'WUN', '0MK', '6YR', 'H6Q', 'CNP', 'TEU', 'MBF', '44S', 'Z9N', 'BM7', 'NGE', 'U9M', 'GMB', 'MTT', '9GP', 'DG0',
|
| 948 |
+
'RP5', 'KBA', 'ALX', 'FVQ', 'TGY', 'EBG', 'BXF', '9C1', 'BBV', 'AFD', '4QY', 'GCD', 'FBP', '96O', 'GNS', 'OTU', 'ACX',
|
| 949 |
+
'RP6', 'UEA', 'SGC', 'Z4V', 'RAM', 'AZC', 'J5B', '1GL', 'TGK', 'HSQ', 'LM2', 'MYG', 'PDX', 'Z6W', 'ZDC', '09X', 'IDX',
|
| 950 |
+
'9MR', 'MFU', 'CR6', 'Z8H', 'SUS', 'PZU', '89Y', '5TK', 'KME', 'U1Y', 'Z4U', 'LCN', 'GPK', 'MUR', '5TJ', 'NYT', '24S',
|
| 951 |
+
'SR1', '0UB', '48Z', 'MGL', 'Z6J', 'BMX', 'C3B', 'TVS', 'SLB', 'IPT', 'MLB', 'SLT', 'Z9D', 'GRX', 'AH8', 'F6P', 'BNX',
|
| 952 |
+
'JZR', 'LXB', 'M3M', 'XYT', 'MA1', 'GTM', 'SCG', 'Z3Q', 'KFN', 'LGC', 'ZB2', 'FIF', 'GLS', 'SSG', 'Z4Y', 'T6T', 'GCS',
|
| 953 |
+
'GZL', 'U8V', 'V3P', 'ABE', 'MGS', '6KW', '8GA', 'BZD', 'FUF', 'GMZ', 'FUY', 'HNW', 'LXZ', 'IN1', 'SNG', 'GAT', 'Z9M',
|
| 954 |
+
'BM3', 'ZDO', '9AM', '3LJ', 'X0X', 'MAN', '5GO', 'AMU', 'GUF', 'XMM', 'EAG', 'SUC', 'BXX', 'Z0F', '9OK', 'CTT', 'MLR',
|
| 955 |
+
'49V', 'ZMR', 'TWJ', 'MAW', '5II', 'ZEE', 'KBG', 'EMP', 'GUZ', 'TUJ', 'RB5', 'GCB', '9KJ', 'MAG', 'Z2D', '6LA', '2M4',
|
| 956 |
+
'GN4', 'MDA', 'TU4', 'Z2T', 'GL4', 'EBQ', 'NNG', '1SD', 'ANA', 'MXY', 'Z6H', 'GU0', 'GUP', 'SG6', 'NAG', '9VP', 'RIP',
|
| 957 |
+
'3S6', 'KDR', 'R1P', '3J4', 'DFX', 'RGG'} # fmt: skip
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
# AlphaFold3 SI Tabel 15
|
| 961 |
+
PBV2_COMMON_NATURAL_LIGANDS = {'UPG', 'CDP', 'DSG', 'APC', 'GSP', 'FAD', 'IPE', 'NAI', '2BA', 'PGA', 'A3P', 'PRP', 'NAD', 'PLG',
|
| 962 |
+
'SFG', 'MFU', 'APR', 'GTP', 'PLP', 'UDP', 'SAH', 'ACP', 'GSH', 'CTP', 'AKG', 'F15', '5AD', 'BCN',
|
| 963 |
+
'BDP', 'H4B', 'PHO', 'FMN', 'MTA', 'NGA', 'OGA', 'SLB', 'SIN', 'C5P', 'TPP', 'BGC', 'NCA', 'UD1',
|
| 964 |
+
'ANP', 'DGL', 'FDA', 'URI', 'ADP', 'MTE', 'PJ8', 'ATP'} # fmt: skip
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/data_pipeline.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from collections import defaultdict
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import Any, Optional, Union
|
| 19 |
+
|
| 20 |
+
import biotite.structure.io as strucio
|
| 21 |
+
import numpy as np
|
| 22 |
+
import pandas as pd
|
| 23 |
+
import torch
|
| 24 |
+
from biotite.structure import AtomArray
|
| 25 |
+
|
| 26 |
+
from protenix.data.msa_featurizer import MSAFeaturizer
|
| 27 |
+
from protenix.data.tokenizer import TokenArray
|
| 28 |
+
from protenix.utils.cropping import CropData
|
| 29 |
+
from protenix.utils.file_io import load_gzip_pickle
|
| 30 |
+
|
| 31 |
+
torch.multiprocessing.set_sharing_strategy("file_system")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class DataPipeline(object):
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def get_label_entity_id_to_asym_id_int(atom_array: AtomArray) -> dict[str, int]:
|
| 38 |
+
"""
|
| 39 |
+
Get a dictionary that associates each label_entity_id with its corresponding asym_id_int.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
atom_array (AtomArray): AtomArray object
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
dict[str, int]: label_entity_id to its asym_id_int
|
| 46 |
+
"""
|
| 47 |
+
entity_to_asym_id = defaultdict(set)
|
| 48 |
+
for atom in atom_array:
|
| 49 |
+
entity_id = atom.label_entity_id
|
| 50 |
+
entity_to_asym_id[entity_id].add(atom.asym_id_int)
|
| 51 |
+
return entity_to_asym_id
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def get_data_bioassembly(
|
| 55 |
+
bioassembly_dict_fpath: Union[str, Path],
|
| 56 |
+
) -> dict[str, Any]:
|
| 57 |
+
"""
|
| 58 |
+
Get the bioassembly dict.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
bioassembly_dict_fpath (Union[str, Path]): The path to the bioassembly dictionary file.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
dict[str, Any]: The bioassembly dict with sequence, atom_array and token_array.
|
| 65 |
+
|
| 66 |
+
Raises:
|
| 67 |
+
AssertionError: If the bioassembly dictionary file does not exist.
|
| 68 |
+
"""
|
| 69 |
+
assert os.path.exists(
|
| 70 |
+
bioassembly_dict_fpath
|
| 71 |
+
), f"File not exists {bioassembly_dict_fpath}"
|
| 72 |
+
bioassembly_dict = load_gzip_pickle(bioassembly_dict_fpath)
|
| 73 |
+
|
| 74 |
+
return bioassembly_dict
|
| 75 |
+
|
| 76 |
+
@staticmethod
|
| 77 |
+
def _map_ref_chain(
|
| 78 |
+
one_sample: pd.Series, bioassembly_dict: dict[str, Any]
|
| 79 |
+
) -> list[int]:
|
| 80 |
+
"""
|
| 81 |
+
Map the chain or interface chain_x_id to the reference chain asym_id.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
one_sample (pd.Series): A dict of one chain or interface from indices list.
|
| 85 |
+
bioassembly_dict (dict[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
list[int]: A list of asym_id_lnt of the chosen chain or interface, length 1 or 2.
|
| 89 |
+
"""
|
| 90 |
+
atom_array = bioassembly_dict["atom_array"]
|
| 91 |
+
ref_chain_indices = []
|
| 92 |
+
for chain_id_field in ["chain_1_id", "chain_2_id"]:
|
| 93 |
+
chain_id = one_sample[chain_id_field]
|
| 94 |
+
assert np.isin(
|
| 95 |
+
chain_id, np.unique(atom_array.chain_id)
|
| 96 |
+
), f"PDB {bioassembly_dict['pdb_id']} {chain_id_field}:{chain_id} not in atom_array"
|
| 97 |
+
chain_asym_id = atom_array[atom_array.chain_id == chain_id].asym_id_int[0]
|
| 98 |
+
ref_chain_indices.append(chain_asym_id)
|
| 99 |
+
if one_sample["type"] == "chain":
|
| 100 |
+
break
|
| 101 |
+
return ref_chain_indices
|
| 102 |
+
|
| 103 |
+
@staticmethod
|
| 104 |
+
def get_msa_raw_features(
|
| 105 |
+
bioassembly_dict: dict[str, Any],
|
| 106 |
+
selected_indices: np.ndarray,
|
| 107 |
+
msa_featurizer: Optional[MSAFeaturizer],
|
| 108 |
+
) -> Optional[dict[str, np.ndarray]]:
|
| 109 |
+
"""
|
| 110 |
+
Get tokenized MSA features of the bioassembly
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
bioassembly_dict (Mapping[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
|
| 114 |
+
selected_indices (torch.Tensor): Cropped token indices.
|
| 115 |
+
msa_featurizer (MSAFeaturizer): MSAFeaturizer instance.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
Optional[dict[str, np.ndarray]]: The tokenized MSA features of the bioassembly.
|
| 119 |
+
"""
|
| 120 |
+
if msa_featurizer is None:
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
entity_to_asym_id_int = dict(
|
| 124 |
+
DataPipeline.get_label_entity_id_to_asym_id_int(
|
| 125 |
+
bioassembly_dict["atom_array"]
|
| 126 |
+
)
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
msa_feats = msa_featurizer(
|
| 130 |
+
bioassembly_dict=bioassembly_dict,
|
| 131 |
+
selected_indices=selected_indices,
|
| 132 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
return msa_feats
|
| 136 |
+
|
| 137 |
+
@staticmethod
|
| 138 |
+
def get_template_raw_features(
|
| 139 |
+
bioassembly_dict: dict[str, Any],
|
| 140 |
+
selected_indices: np.ndarray,
|
| 141 |
+
template_featurizer: None,
|
| 142 |
+
) -> Optional[dict[str, np.ndarray]]:
|
| 143 |
+
"""
|
| 144 |
+
Get tokenized template features of the bioassembly.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
bioassembly_dict (dict[str, Any]): The bioassembly dict with sequence, atom_array and token_array.
|
| 148 |
+
selected_indices (np.ndarray): Cropped token indices.
|
| 149 |
+
template_featurizer (None): Placeholder for the template featurizer.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
Optional[dict[str, np.ndarray]]: The tokenized template features of the bioassembly,
|
| 153 |
+
or None if the template featurizer is not provided.
|
| 154 |
+
"""
|
| 155 |
+
if template_featurizer is None:
|
| 156 |
+
return None
|
| 157 |
+
|
| 158 |
+
entity_to_asym_id_int = dict(
|
| 159 |
+
DataPipeline.get_label_entity_id_to_asym_id_int(
|
| 160 |
+
bioassembly_dict["atom_array"]
|
| 161 |
+
)
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
template_feats = template_featurizer(
|
| 165 |
+
bioassembly_dict=bioassembly_dict,
|
| 166 |
+
selected_indices=selected_indices,
|
| 167 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 168 |
+
)
|
| 169 |
+
return template_feats
|
| 170 |
+
|
| 171 |
+
@staticmethod
|
| 172 |
+
def crop(
|
| 173 |
+
one_sample: pd.Series,
|
| 174 |
+
bioassembly_dict: dict[str, Any],
|
| 175 |
+
crop_size: int,
|
| 176 |
+
msa_featurizer: Optional[MSAFeaturizer],
|
| 177 |
+
template_featurizer: None,
|
| 178 |
+
method_weights: list[float] = [0.2, 0.4, 0.4],
|
| 179 |
+
contiguous_crop_complete_lig: bool = False,
|
| 180 |
+
spatial_crop_complete_lig: bool = False,
|
| 181 |
+
drop_last: bool = False,
|
| 182 |
+
remove_metal: bool = False,
|
| 183 |
+
) -> tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
|
| 184 |
+
"""
|
| 185 |
+
Crop data based on the crop size and reference chain indices.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
one_sample (pd.Series): A dict of one chain or interface from indices list.
|
| 189 |
+
bioassembly_dict (dict[str, Any]): A dict of bioassembly dict with sequence, atom_array and token_array.
|
| 190 |
+
crop_size (int): the crop size.
|
| 191 |
+
msa_featurizer (MSAFeaturizer): Default to an empty replacement for msa featurizer.
|
| 192 |
+
template_featurizer (None): Placeholder for the template featurizer.
|
| 193 |
+
method_weights (list[float]): The weights corresponding to these three cropping methods:
|
| 194 |
+
["ContiguousCropping", "SpatialCropping", "SpatialInterfaceCropping"].
|
| 195 |
+
contiguous_crop_complete_lig (bool): Whether to crop the complete ligand in ContiguousCropping method.
|
| 196 |
+
spatial_crop_complete_lig (bool): Whether to crop the complete ligand in SpatialCropping method.
|
| 197 |
+
drop_last (bool): Whether to drop the last fragment in ContiguousCropping.
|
| 198 |
+
remove_metal (bool): Whether to remove metal atoms from the crop.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
|
| 202 |
+
crop_method (str): The crop method.
|
| 203 |
+
cropped_token_array (TokenArray): TokenArray after cropping.
|
| 204 |
+
cropped_atom_array (AtomArray): AtomArray after cropping.
|
| 205 |
+
cropped_msa_features (dict[str, Any]): The cropped msa features.
|
| 206 |
+
cropped_template_features (dict[str, Any]): The cropped template features.
|
| 207 |
+
"""
|
| 208 |
+
if crop_size <= 0:
|
| 209 |
+
selected_indices = None
|
| 210 |
+
# Prepare msa
|
| 211 |
+
msa_features = DataPipeline.get_msa_raw_features(
|
| 212 |
+
bioassembly_dict=bioassembly_dict,
|
| 213 |
+
selected_indices=selected_indices,
|
| 214 |
+
msa_featurizer=msa_featurizer,
|
| 215 |
+
)
|
| 216 |
+
# Prepare template
|
| 217 |
+
template_features = DataPipeline.get_template_raw_features(
|
| 218 |
+
bioassembly_dict=bioassembly_dict,
|
| 219 |
+
selected_indices=selected_indices,
|
| 220 |
+
template_featurizer=template_featurizer,
|
| 221 |
+
)
|
| 222 |
+
return (
|
| 223 |
+
"no_crop",
|
| 224 |
+
bioassembly_dict["token_array"],
|
| 225 |
+
bioassembly_dict["atom_array"],
|
| 226 |
+
msa_features or {},
|
| 227 |
+
template_features or {},
|
| 228 |
+
-1,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
ref_chain_indices = DataPipeline._map_ref_chain(
|
| 232 |
+
one_sample=one_sample, bioassembly_dict=bioassembly_dict
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
crop = CropData(
|
| 236 |
+
crop_size=crop_size,
|
| 237 |
+
ref_chain_indices=ref_chain_indices,
|
| 238 |
+
token_array=bioassembly_dict["token_array"],
|
| 239 |
+
atom_array=bioassembly_dict["atom_array"],
|
| 240 |
+
method_weights=method_weights,
|
| 241 |
+
contiguous_crop_complete_lig=contiguous_crop_complete_lig,
|
| 242 |
+
spatial_crop_complete_lig=spatial_crop_complete_lig,
|
| 243 |
+
drop_last=drop_last,
|
| 244 |
+
remove_metal=remove_metal,
|
| 245 |
+
)
|
| 246 |
+
# Get crop method
|
| 247 |
+
crop_method = crop.random_crop_method()
|
| 248 |
+
# Get crop indices based crop method
|
| 249 |
+
selected_indices, reference_token_index = crop.get_crop_indices(
|
| 250 |
+
crop_method=crop_method
|
| 251 |
+
)
|
| 252 |
+
# Prepare msa
|
| 253 |
+
msa_features = DataPipeline.get_msa_raw_features(
|
| 254 |
+
bioassembly_dict=bioassembly_dict,
|
| 255 |
+
selected_indices=selected_indices,
|
| 256 |
+
msa_featurizer=msa_featurizer,
|
| 257 |
+
)
|
| 258 |
+
# Prepare template
|
| 259 |
+
template_features = DataPipeline.get_template_raw_features(
|
| 260 |
+
bioassembly_dict=bioassembly_dict,
|
| 261 |
+
selected_indices=selected_indices,
|
| 262 |
+
template_featurizer=template_featurizer,
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
(
|
| 266 |
+
cropped_token_array,
|
| 267 |
+
cropped_atom_array,
|
| 268 |
+
cropped_msa_features,
|
| 269 |
+
cropped_template_features,
|
| 270 |
+
) = crop.crop_by_indices(
|
| 271 |
+
selected_token_indices=selected_indices,
|
| 272 |
+
msa_features=msa_features,
|
| 273 |
+
template_features=template_features,
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
if crop_method == "ContiguousCropping":
|
| 277 |
+
resovled_atom_num = cropped_atom_array.is_resolved.sum()
|
| 278 |
+
# The criterion of “more than 4 atoms” is chosen arbitrarily.
|
| 279 |
+
assert (
|
| 280 |
+
resovled_atom_num > 4
|
| 281 |
+
), f"{resovled_atom_num=} <= 4 after ContiguousCropping"
|
| 282 |
+
|
| 283 |
+
return (
|
| 284 |
+
crop_method,
|
| 285 |
+
cropped_token_array,
|
| 286 |
+
cropped_atom_array,
|
| 287 |
+
cropped_msa_features,
|
| 288 |
+
cropped_template_features,
|
| 289 |
+
reference_token_index,
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
@staticmethod
|
| 293 |
+
def save_atoms_to_cif(
|
| 294 |
+
output_cif_file: str, atom_array: AtomArray, include_bonds: bool = False
|
| 295 |
+
) -> None:
|
| 296 |
+
"""
|
| 297 |
+
Save atom array data to a CIF file.
|
| 298 |
+
|
| 299 |
+
Args:
|
| 300 |
+
output_cif_file (str): The output path for saving atom array in cif
|
| 301 |
+
atom_array (AtomArray): The atom array to be saved
|
| 302 |
+
include_bonds (bool): Whether to include bond information in the CIF file. Default is False.
|
| 303 |
+
|
| 304 |
+
"""
|
| 305 |
+
strucio.save_structure(
|
| 306 |
+
file_path=output_cif_file,
|
| 307 |
+
array=atom_array,
|
| 308 |
+
data_block=os.path.basename(output_cif_file).replace(".cif", ""),
|
| 309 |
+
include_bonds=include_bonds,
|
| 310 |
+
)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/dataloader.py
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import math
|
| 16 |
+
from typing import Iterator, Optional, Sequence
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.distributed as dist
|
| 20 |
+
from ml_collections.config_dict import ConfigDict
|
| 21 |
+
from torch.utils.data import DataLoader, DistributedSampler, Sampler
|
| 22 |
+
|
| 23 |
+
from protenix.data.dataset import Dataset, get_datasets
|
| 24 |
+
from protenix.utils.logger import get_logger
|
| 25 |
+
|
| 26 |
+
logger = get_logger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class WeightedSampler(Sampler):
|
| 30 |
+
"""
|
| 31 |
+
A weighted sampler for single node.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
weights: Sequence[float],
|
| 37 |
+
num_samples: int,
|
| 38 |
+
replacement: bool,
|
| 39 |
+
seed: int = 0,
|
| 40 |
+
):
|
| 41 |
+
"""
|
| 42 |
+
Args:
|
| 43 |
+
weights (list or numpy array): A list or numpy array of weights.
|
| 44 |
+
num_samples (int): The number of samples to be drawn.
|
| 45 |
+
replacement (bool): Whether sampling is done with replacement.
|
| 46 |
+
seed (int): The seed for the random number generator.
|
| 47 |
+
"""
|
| 48 |
+
self.weights = torch.as_tensor(weights, dtype=torch.double)
|
| 49 |
+
self.replacement = replacement
|
| 50 |
+
self.seed = seed
|
| 51 |
+
self.epoch = 0
|
| 52 |
+
self.num_samples = num_samples
|
| 53 |
+
|
| 54 |
+
def __iter__(self) -> Iterator[int]:
|
| 55 |
+
"""
|
| 56 |
+
Generates an iterator over the sampled indices.
|
| 57 |
+
|
| 58 |
+
This method uses a random number generator to sample indices based on the provided weights.
|
| 59 |
+
The generator is seeded with the current seed and epoch to ensure reproducibility.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
iter: An iterator over the sampled indices.
|
| 63 |
+
"""
|
| 64 |
+
g = torch.Generator()
|
| 65 |
+
g.manual_seed(self.seed + self.epoch)
|
| 66 |
+
indices = torch.multinomial(
|
| 67 |
+
self.weights, self.num_samples, self.replacement, generator=g
|
| 68 |
+
).tolist()
|
| 69 |
+
return iter(indices)
|
| 70 |
+
|
| 71 |
+
def __len__(self) -> int:
|
| 72 |
+
return self.num_samples
|
| 73 |
+
|
| 74 |
+
def set_epoch(self, epoch: int) -> None:
|
| 75 |
+
self.epoch = epoch
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class DistributedWeightedSampler(DistributedSampler):
|
| 79 |
+
"""
|
| 80 |
+
A distributed weighted sampler for multiple nodes.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
dataset: Dataset,
|
| 86 |
+
weights: Sequence[float],
|
| 87 |
+
num_samples: int,
|
| 88 |
+
num_replicas: Optional[int] = None,
|
| 89 |
+
rank: Optional[int] = None,
|
| 90 |
+
replacement: bool = True,
|
| 91 |
+
seed: int = 0,
|
| 92 |
+
):
|
| 93 |
+
"""
|
| 94 |
+
Args:
|
| 95 |
+
dataset (Dataset): The dataset to be loaded.
|
| 96 |
+
weights (list): The weights associated with the dataset.
|
| 97 |
+
num_samples (int): The total number of samples to be drawn.
|
| 98 |
+
num_replicas (int, optional): The number of replicas to use for distributed sampling. Defaults to None.
|
| 99 |
+
rank (int, optional): The rank of the current process in a distributed environment. Defaults to None.
|
| 100 |
+
replacement (bool, optional): Whether to sample with replacement. Defaults to True.
|
| 101 |
+
seed (int, optional): The random seed for reproducibility. Defaults to 0.
|
| 102 |
+
"""
|
| 103 |
+
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=False)
|
| 104 |
+
self.weights = torch.as_tensor(weights, dtype=torch.double)
|
| 105 |
+
self.replacement = replacement
|
| 106 |
+
self.seed = seed
|
| 107 |
+
self.epoch = 0
|
| 108 |
+
self.num_samples = num_samples
|
| 109 |
+
|
| 110 |
+
self.num_samples_per_replica = int(
|
| 111 |
+
math.ceil(self.num_samples / self.num_replicas)
|
| 112 |
+
)
|
| 113 |
+
self.total_size = self.num_samples_per_replica * self.num_replicas
|
| 114 |
+
|
| 115 |
+
def __iter__(self) -> Iterator[int]:
|
| 116 |
+
"""
|
| 117 |
+
Generates an iterator over the sampled indices for the current process in a distributed environment.
|
| 118 |
+
|
| 119 |
+
This method uses a random number generator to sample indices based on the provided weights.
|
| 120 |
+
The generator is seeded with the current seed and epoch to ensure reproducibility.
|
| 121 |
+
The sampled indices are then distributed across the replicas according to the rank of the current process.
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
iter: An iterator over the sampled indices for the current process.
|
| 125 |
+
"""
|
| 126 |
+
g = torch.Generator()
|
| 127 |
+
g.manual_seed(self.seed + self.epoch)
|
| 128 |
+
indices = torch.multinomial(
|
| 129 |
+
self.weights, self.num_samples, self.replacement, generator=g
|
| 130 |
+
).tolist()
|
| 131 |
+
indices = indices[self.rank : self.total_size : self.num_replicas]
|
| 132 |
+
return iter(indices)
|
| 133 |
+
|
| 134 |
+
def __len__(self) -> int:
|
| 135 |
+
return self.num_samples // self.num_replicas
|
| 136 |
+
|
| 137 |
+
def set_epoch(self, epoch: int) -> None:
|
| 138 |
+
self.epoch = epoch
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class KeySumBalancedSampler(Sampler):
|
| 142 |
+
def __init__(
|
| 143 |
+
self,
|
| 144 |
+
dataset: Dataset,
|
| 145 |
+
key: str,
|
| 146 |
+
value_scale: float = 1.0,
|
| 147 |
+
seed: Optional[int] = None,
|
| 148 |
+
num_replicas: Optional[int] = None,
|
| 149 |
+
rank: Optional[int] = None,
|
| 150 |
+
):
|
| 151 |
+
"""
|
| 152 |
+
This method initializes the KeySumBalancedSampler.
|
| 153 |
+
It calls the `get_balanced_assignments` method to distribute the dataset indices across workers based on the key sum.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
dataset (Dataset): The dataset to sample from.
|
| 157 |
+
key (str): The key by which data will be balanced (integer value).
|
| 158 |
+
value_scale (float): The multiplier of key value when computing the worker assignment weight
|
| 159 |
+
num_replicas (int, optional): Number of processes participating in distributed training.
|
| 160 |
+
rank (int, optional): Rank of the current process within num_replicas.
|
| 161 |
+
"""
|
| 162 |
+
self.dataset = dataset
|
| 163 |
+
self.key = key
|
| 164 |
+
self.value_scale = value_scale
|
| 165 |
+
self.seed = seed
|
| 166 |
+
self.num_replicas = num_replicas or dist.get_world_size()
|
| 167 |
+
self.rank = rank or dist.get_rank()
|
| 168 |
+
|
| 169 |
+
# Get indices for this process after balancing by key sum
|
| 170 |
+
worker_assignments = self.get_balanced_assignments()
|
| 171 |
+
self.indices = worker_assignments[self.rank]
|
| 172 |
+
|
| 173 |
+
def get_balanced_assignments(self):
|
| 174 |
+
"""
|
| 175 |
+
Distribute dataset indices across workers such that the sum of key values
|
| 176 |
+
assigned to each worker is as balanced as possible.
|
| 177 |
+
"""
|
| 178 |
+
if self.seed is not None:
|
| 179 |
+
# deterministically shuffle based on seed
|
| 180 |
+
g = torch.Generator()
|
| 181 |
+
g.manual_seed(self.seed)
|
| 182 |
+
indices = torch.randperm(len(self.dataset), generator=g).tolist()
|
| 183 |
+
else:
|
| 184 |
+
indices = list(range(len(self.dataset)))
|
| 185 |
+
|
| 186 |
+
# pad for len(dataset) to self.num_replicas if len(dataset) < self.num_replicas
|
| 187 |
+
while len(indices) < self.num_replicas:
|
| 188 |
+
indices += indices[: (self.num_replicas - len(indices))]
|
| 189 |
+
|
| 190 |
+
if isinstance(self.dataset.indices_list, list):
|
| 191 |
+
# e.g. recentPDB test set
|
| 192 |
+
dataset_values = [
|
| 193 |
+
x[self.key].astype(int)[0] for x in self.dataset.indices_list
|
| 194 |
+
]
|
| 195 |
+
else:
|
| 196 |
+
# e.g. posebuster test set
|
| 197 |
+
dataset_values = self.dataset.indices_list[self.key].astype(int).to_numpy()
|
| 198 |
+
|
| 199 |
+
# Sort indices by key value
|
| 200 |
+
key_value_pairs = [(idx, dataset_values[idx]) for idx in indices]
|
| 201 |
+
key_value_pairs.sort(key=lambda x: x[1], reverse=True)
|
| 202 |
+
|
| 203 |
+
# Calculate the target number of samples per worker
|
| 204 |
+
num_samples_per_worker = len(self.dataset) // self.num_replicas
|
| 205 |
+
|
| 206 |
+
# Initialize containers for worker assignments and their current key sum
|
| 207 |
+
worker_assignments = [[] for _ in range(self.num_replicas)]
|
| 208 |
+
worker_sums = [0] * self.num_replicas
|
| 209 |
+
total_samples = num_samples_per_worker * self.num_replicas
|
| 210 |
+
|
| 211 |
+
# Distribute samples using a greedy strategy to balance the key sum
|
| 212 |
+
for idx, key_value in key_value_pairs[:total_samples]:
|
| 213 |
+
# Find the worker with the smallest sum that hasn't exceeded its target sample count
|
| 214 |
+
min_worker = min(
|
| 215 |
+
range(self.num_replicas),
|
| 216 |
+
key=lambda i: (
|
| 217 |
+
worker_sums[i]
|
| 218 |
+
if len(worker_assignments[i]) < num_samples_per_worker
|
| 219 |
+
else float("inf")
|
| 220 |
+
),
|
| 221 |
+
)
|
| 222 |
+
worker_assignments[min_worker].append(idx)
|
| 223 |
+
worker_sums[min_worker] += key_value**2
|
| 224 |
+
|
| 225 |
+
# Fix any discrepancies in the number of samples
|
| 226 |
+
all_indices = [idx for idx, _ in key_value_pairs]
|
| 227 |
+
|
| 228 |
+
# Assign remaining samples if the dataset isn't divisible perfectly
|
| 229 |
+
if len(all_indices) > total_samples:
|
| 230 |
+
for i in range(len(all_indices) - total_samples):
|
| 231 |
+
worker_assignments[i % self.num_replicas].append(
|
| 232 |
+
all_indices[total_samples + i]
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
# Return the indices assigned to the current worker
|
| 236 |
+
return worker_assignments
|
| 237 |
+
|
| 238 |
+
def __iter__(self):
|
| 239 |
+
return iter(self.indices)
|
| 240 |
+
|
| 241 |
+
def __len__(self):
|
| 242 |
+
return len(self.indices)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class IterDataLoader(DataLoader):
|
| 246 |
+
"""
|
| 247 |
+
Iterative dataloader for single node.
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
def __init__(self, *args, **kwargs):
|
| 251 |
+
super(IterDataLoader, self).__init__(*args, **kwargs)
|
| 252 |
+
assert self.sampler is not None
|
| 253 |
+
self.counter = 0
|
| 254 |
+
|
| 255 |
+
def __iter__(self):
|
| 256 |
+
self.sampler.set_epoch(self.counter)
|
| 257 |
+
self.counter += 1
|
| 258 |
+
_iterator = super(IterDataLoader, self).__iter__()
|
| 259 |
+
return _iterator
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class DistributedDataLoader(DataLoader):
|
| 263 |
+
"""
|
| 264 |
+
Distributed dataloader for multiple nodes.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
def __init__(
|
| 268 |
+
self,
|
| 269 |
+
dataset: Dataset,
|
| 270 |
+
batch_size: int,
|
| 271 |
+
num_workers: int = 0,
|
| 272 |
+
collate_fn=None,
|
| 273 |
+
seed: int = 42,
|
| 274 |
+
drop_last: bool = True,
|
| 275 |
+
shuffle: bool = True,
|
| 276 |
+
sampler: Sampler = None,
|
| 277 |
+
):
|
| 278 |
+
if sampler is not None:
|
| 279 |
+
self.sampler = sampler
|
| 280 |
+
else:
|
| 281 |
+
self.sampler = DistributedSampler(
|
| 282 |
+
dataset, shuffle=shuffle, seed=seed, drop_last=drop_last
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
super(DistributedDataLoader, self).__init__(
|
| 286 |
+
dataset=dataset,
|
| 287 |
+
batch_size=batch_size,
|
| 288 |
+
num_workers=num_workers,
|
| 289 |
+
sampler=self.sampler,
|
| 290 |
+
shuffle=False,
|
| 291 |
+
collate_fn=collate_fn,
|
| 292 |
+
)
|
| 293 |
+
self.counter = 0
|
| 294 |
+
|
| 295 |
+
def __iter__(self):
|
| 296 |
+
self.sampler.set_epoch(self.counter)
|
| 297 |
+
self.counter += 1
|
| 298 |
+
_iterator = super(DistributedDataLoader, self).__iter__()
|
| 299 |
+
return _iterator
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def get_dataloaders(
|
| 303 |
+
configs: ConfigDict, world_size: int, seed: int, error_dir: Optional[str] = None
|
| 304 |
+
):
|
| 305 |
+
"""
|
| 306 |
+
Generate data loaders for training and testing based on the given configurations and seed.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
configs (ConfigDict): An object containing the data configuration information.
|
| 310 |
+
world_size (int): The number of processes in the distributed environment.
|
| 311 |
+
seed (int): The random seed used for data sampling.
|
| 312 |
+
error_dir (str, optional): The directory to store error information. Defaults to None.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
tuple: A tuple containing the training data loader and a dictionary of testing data loaders.
|
| 316 |
+
|
| 317 |
+
"""
|
| 318 |
+
train_dataset, test_datasets = get_datasets(configs, error_dir)
|
| 319 |
+
if world_size > 1:
|
| 320 |
+
train_sampler = DistributedWeightedSampler(
|
| 321 |
+
train_dataset,
|
| 322 |
+
train_dataset.merged_datapoint_weights,
|
| 323 |
+
num_samples=configs.data.epoch_size,
|
| 324 |
+
replacement=True,
|
| 325 |
+
seed=seed,
|
| 326 |
+
)
|
| 327 |
+
train_dl = DistributedDataLoader(
|
| 328 |
+
dataset=train_dataset,
|
| 329 |
+
batch_size=1,
|
| 330 |
+
shuffle=False,
|
| 331 |
+
num_workers=configs.data.num_dl_workers,
|
| 332 |
+
collate_fn=lambda batch: batch[0],
|
| 333 |
+
sampler=train_sampler,
|
| 334 |
+
)
|
| 335 |
+
else:
|
| 336 |
+
|
| 337 |
+
train_sampler = WeightedSampler(
|
| 338 |
+
weights=train_dataset.merged_datapoint_weights,
|
| 339 |
+
num_samples=configs.data.epoch_size,
|
| 340 |
+
replacement=True,
|
| 341 |
+
seed=seed,
|
| 342 |
+
)
|
| 343 |
+
train_dl = IterDataLoader(
|
| 344 |
+
dataset=train_dataset,
|
| 345 |
+
batch_size=1,
|
| 346 |
+
shuffle=False,
|
| 347 |
+
num_workers=configs.data.num_dl_workers,
|
| 348 |
+
collate_fn=lambda batch: batch[0],
|
| 349 |
+
sampler=train_sampler,
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
test_dls = {}
|
| 353 |
+
test_dataset_sizes = {}
|
| 354 |
+
for test_name, test_dataset in test_datasets.items():
|
| 355 |
+
test_dataset_sizes[test_name] = len(test_dataset)
|
| 356 |
+
test_sampler = (
|
| 357 |
+
KeySumBalancedSampler(test_dataset, key="num_tokens", seed=configs.seed)
|
| 358 |
+
if world_size > 1
|
| 359 |
+
else None
|
| 360 |
+
)
|
| 361 |
+
test_dls[test_name] = DataLoader(
|
| 362 |
+
test_dataset,
|
| 363 |
+
batch_size=1,
|
| 364 |
+
shuffle=False,
|
| 365 |
+
num_workers=configs.data.num_dl_workers,
|
| 366 |
+
sampler=test_sampler,
|
| 367 |
+
collate_fn=lambda batch: batch[0],
|
| 368 |
+
)
|
| 369 |
+
logger.info(
|
| 370 |
+
f"train data size: {len(train_dataset)}, test size: {test_dataset_sizes}"
|
| 371 |
+
)
|
| 372 |
+
return train_dl, test_dls
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/dataset.py
ADDED
|
@@ -0,0 +1,1100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import random
|
| 18 |
+
import traceback
|
| 19 |
+
from copy import deepcopy
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
from typing import Any, Callable, Optional, Union
|
| 22 |
+
|
| 23 |
+
import numpy as np
|
| 24 |
+
import pandas as pd
|
| 25 |
+
import torch
|
| 26 |
+
from biotite.structure.atoms import AtomArray
|
| 27 |
+
from ml_collections.config_dict import ConfigDict
|
| 28 |
+
from torch.utils.data import Dataset
|
| 29 |
+
|
| 30 |
+
from protenix.data.constants import EvaluationChainInterface
|
| 31 |
+
from protenix.data.data_pipeline import DataPipeline
|
| 32 |
+
from protenix.data.featurizer import Featurizer
|
| 33 |
+
from protenix.data.msa_featurizer import MSAFeaturizer
|
| 34 |
+
from protenix.data.tokenizer import TokenArray
|
| 35 |
+
from protenix.data.utils import data_type_transform, make_dummy_feature
|
| 36 |
+
from protenix.utils.cropping import CropData
|
| 37 |
+
from protenix.utils.file_io import read_indices_csv
|
| 38 |
+
from protenix.utils.logger import get_logger
|
| 39 |
+
from protenix.utils.torch_utils import dict_to_tensor
|
| 40 |
+
|
| 41 |
+
logger = get_logger(__name__)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class BaseSingleDataset(Dataset):
|
| 45 |
+
"""
|
| 46 |
+
dataset for a single data source
|
| 47 |
+
data = self.__item__(idx)
|
| 48 |
+
return a dict of features and labels, the keys and the shape are defined in protenix.data.utils
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(
|
| 52 |
+
self,
|
| 53 |
+
mmcif_dir: Union[str, Path],
|
| 54 |
+
bioassembly_dict_dir: Optional[Union[str, Path]],
|
| 55 |
+
indices_fpath: Union[str, Path],
|
| 56 |
+
cropping_configs: dict[str, Any],
|
| 57 |
+
msa_featurizer: Optional[MSAFeaturizer] = None,
|
| 58 |
+
template_featurizer: Optional[Any] = None,
|
| 59 |
+
name: str = None,
|
| 60 |
+
**kwargs,
|
| 61 |
+
) -> None:
|
| 62 |
+
super(BaseSingleDataset, self).__init__()
|
| 63 |
+
|
| 64 |
+
# Configs
|
| 65 |
+
self.mmcif_dir = mmcif_dir
|
| 66 |
+
self.bioassembly_dict_dir = bioassembly_dict_dir
|
| 67 |
+
self.indices_fpath = indices_fpath
|
| 68 |
+
self.cropping_configs = cropping_configs
|
| 69 |
+
self.name = name
|
| 70 |
+
# General dataset configs
|
| 71 |
+
self.ref_pos_augment = kwargs.get("ref_pos_augment", True)
|
| 72 |
+
self.lig_atom_rename = kwargs.get("lig_atom_rename", False)
|
| 73 |
+
self.reassign_continuous_chain_ids = kwargs.get(
|
| 74 |
+
"reassign_continuous_chain_ids", False
|
| 75 |
+
)
|
| 76 |
+
self.shuffle_mols = kwargs.get("shuffle_mols", False)
|
| 77 |
+
self.shuffle_sym_ids = kwargs.get("shuffle_sym_ids", False)
|
| 78 |
+
|
| 79 |
+
# Typically used for test sets
|
| 80 |
+
self.find_pocket = kwargs.get("find_pocket", False)
|
| 81 |
+
self.find_all_pockets = kwargs.get("find_all_pockets", False) # for dev
|
| 82 |
+
self.find_eval_chain_interface = kwargs.get("find_eval_chain_interface", False)
|
| 83 |
+
self.group_by_pdb_id = kwargs.get("group_by_pdb_id", False) # for test set
|
| 84 |
+
self.sort_by_n_token = kwargs.get("sort_by_n_token", False)
|
| 85 |
+
|
| 86 |
+
# Typically used for training set
|
| 87 |
+
self.random_sample_if_failed = kwargs.get("random_sample_if_failed", False)
|
| 88 |
+
self.use_reference_chains_only = kwargs.get("use_reference_chains_only", False)
|
| 89 |
+
self.is_distillation = kwargs.get("is_distillation", False)
|
| 90 |
+
|
| 91 |
+
# Configs for data filters
|
| 92 |
+
self.max_n_token = kwargs.get("max_n_token", -1)
|
| 93 |
+
self.pdb_list = kwargs.get("pdb_list", None)
|
| 94 |
+
if len(self.pdb_list) == 0:
|
| 95 |
+
self.pdb_list = None
|
| 96 |
+
# Used for removing rows in the indices list. Column names and excluded values are specified in this dict.
|
| 97 |
+
self.exclusion_dict = kwargs.get("exclusion", {})
|
| 98 |
+
self.limits = kwargs.get(
|
| 99 |
+
"limits", -1
|
| 100 |
+
) # Limit number of indices rows, mainly for test
|
| 101 |
+
|
| 102 |
+
self.error_dir = kwargs.get("error_dir", None)
|
| 103 |
+
if self.error_dir is not None:
|
| 104 |
+
os.makedirs(self.error_dir, exist_ok=True)
|
| 105 |
+
|
| 106 |
+
self.msa_featurizer = msa_featurizer
|
| 107 |
+
self.template_featurizer = template_featurizer
|
| 108 |
+
|
| 109 |
+
# Read data
|
| 110 |
+
self.indices_list = self.read_indices_list(indices_fpath)
|
| 111 |
+
|
| 112 |
+
@staticmethod
|
| 113 |
+
def read_pdb_list(pdb_list: Union[list, str]) -> Optional[list]:
|
| 114 |
+
"""
|
| 115 |
+
Reads a list of PDB IDs from a file or directly from a list.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
pdb_list: A list of PDB IDs or a file path containing PDB IDs.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
A list of PDB IDs if the input is valid, otherwise None.
|
| 122 |
+
"""
|
| 123 |
+
if pdb_list is None:
|
| 124 |
+
return None
|
| 125 |
+
|
| 126 |
+
if isinstance(pdb_list, list):
|
| 127 |
+
return pdb_list
|
| 128 |
+
|
| 129 |
+
with open(pdb_list, "r") as f:
|
| 130 |
+
pdb_filter_list = []
|
| 131 |
+
for l in f.readlines():
|
| 132 |
+
l = l.strip()
|
| 133 |
+
if l:
|
| 134 |
+
pdb_filter_list.append(l)
|
| 135 |
+
return pdb_filter_list
|
| 136 |
+
|
| 137 |
+
def read_indices_list(self, indices_fpath: Union[str, Path]) -> pd.DataFrame:
|
| 138 |
+
"""
|
| 139 |
+
Reads and processes a list of indices from a CSV file.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
indices_fpath: Path to the CSV file containing the indices.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
A DataFrame containing the processed indices.
|
| 146 |
+
"""
|
| 147 |
+
indices_list = read_indices_csv(indices_fpath)
|
| 148 |
+
num_data = len(indices_list)
|
| 149 |
+
logger.info(f"#Rows in indices list: {num_data}")
|
| 150 |
+
# Filter by pdb_list
|
| 151 |
+
if self.pdb_list is not None:
|
| 152 |
+
pdb_filter_list = set(self.read_pdb_list(pdb_list=self.pdb_list))
|
| 153 |
+
indices_list = indices_list[indices_list["pdb_id"].isin(pdb_filter_list)]
|
| 154 |
+
logger.info(f"[filtered by pdb_list] #Rows: {len(indices_list)}")
|
| 155 |
+
|
| 156 |
+
# Filter by max_n_token
|
| 157 |
+
if self.max_n_token > 0:
|
| 158 |
+
valid_mask = indices_list["num_tokens"].astype(int) <= self.max_n_token
|
| 159 |
+
removed_list = indices_list[~valid_mask]
|
| 160 |
+
indices_list = indices_list[valid_mask]
|
| 161 |
+
logger.info(f"[removed] #Rows: {len(removed_list)}")
|
| 162 |
+
logger.info(f"[removed] #PDB: {removed_list['pdb_id'].nunique()}")
|
| 163 |
+
logger.info(
|
| 164 |
+
f"[filtered by n_token ({self.max_n_token})] #Rows: {len(indices_list)}"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Filter by exclusion_dict
|
| 168 |
+
for col_name, exclusion_list in self.exclusion_dict.items():
|
| 169 |
+
cols = col_name.split("|")
|
| 170 |
+
exclusion_set = {tuple(excl.split("|")) for excl in exclusion_list}
|
| 171 |
+
|
| 172 |
+
def is_valid(row):
|
| 173 |
+
return tuple(row[col] for col in cols) not in exclusion_set
|
| 174 |
+
|
| 175 |
+
valid_mask = indices_list.apply(is_valid, axis=1)
|
| 176 |
+
indices_list = indices_list[valid_mask].reset_index(drop=True)
|
| 177 |
+
logger.info(
|
| 178 |
+
f"[Excluded by {col_name} -- {exclusion_list}] #Rows: {len(indices_list)}"
|
| 179 |
+
)
|
| 180 |
+
self.print_data_stats(indices_list)
|
| 181 |
+
|
| 182 |
+
# Group by pdb_id
|
| 183 |
+
# A list of dataframe. Each contains one pdb with multiple rows.
|
| 184 |
+
if self.group_by_pdb_id:
|
| 185 |
+
indices_list = [
|
| 186 |
+
df.reset_index() for _, df in indices_list.groupby("pdb_id", sort=True)
|
| 187 |
+
]
|
| 188 |
+
|
| 189 |
+
if self.sort_by_n_token:
|
| 190 |
+
# Sort the dataset in a descending order, so that if OOM it will raise Error at an early stage.
|
| 191 |
+
if self.group_by_pdb_id:
|
| 192 |
+
indices_list = sorted(
|
| 193 |
+
indices_list,
|
| 194 |
+
key=lambda df: int(df["num_tokens"].iloc[0]),
|
| 195 |
+
reverse=True,
|
| 196 |
+
)
|
| 197 |
+
else:
|
| 198 |
+
indices_list = indices_list.sort_values(
|
| 199 |
+
by="num_tokens", key=lambda x: x.astype(int), ascending=False
|
| 200 |
+
).reset_index(drop=True)
|
| 201 |
+
|
| 202 |
+
if self.find_eval_chain_interface:
|
| 203 |
+
# Remove data that does not contain eval_type in the EvaluationChainInterface list
|
| 204 |
+
if self.group_by_pdb_id:
|
| 205 |
+
indices_list = [
|
| 206 |
+
df
|
| 207 |
+
for df in indices_list
|
| 208 |
+
if len(
|
| 209 |
+
set(df["eval_type"].to_list()).intersection(
|
| 210 |
+
set(EvaluationChainInterface)
|
| 211 |
+
)
|
| 212 |
+
)
|
| 213 |
+
> 0
|
| 214 |
+
]
|
| 215 |
+
else:
|
| 216 |
+
indices_list = indices_list[
|
| 217 |
+
indices_list["eval_type"].apply(
|
| 218 |
+
lambda x: x in EvaluationChainInterface
|
| 219 |
+
)
|
| 220 |
+
]
|
| 221 |
+
if self.limits > 0 and len(indices_list) > self.limits:
|
| 222 |
+
logger.info(
|
| 223 |
+
f"Limit indices list size from {len(indices_list)} to {self.limits}"
|
| 224 |
+
)
|
| 225 |
+
indices_list = indices_list[: self.limits]
|
| 226 |
+
return indices_list
|
| 227 |
+
|
| 228 |
+
def print_data_stats(self, df: pd.DataFrame) -> None:
|
| 229 |
+
"""
|
| 230 |
+
Prints statistics about the dataset, including the distribution of molecular group types.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
df: A DataFrame containing the indices list.
|
| 234 |
+
"""
|
| 235 |
+
if self.name:
|
| 236 |
+
logger.info("-" * 10 + f" Dataset {self.name}" + "-" * 10)
|
| 237 |
+
df["mol_group_type"] = df.apply(
|
| 238 |
+
lambda row: "_".join(
|
| 239 |
+
sorted(
|
| 240 |
+
[
|
| 241 |
+
str(row["mol_1_type"]),
|
| 242 |
+
str(row["mol_2_type"]).replace("nan", "intra"),
|
| 243 |
+
]
|
| 244 |
+
)
|
| 245 |
+
),
|
| 246 |
+
axis=1,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
group_size_dict = dict(df["mol_group_type"].value_counts())
|
| 250 |
+
for i, n_i in group_size_dict.items():
|
| 251 |
+
logger.info(f"{i}: {n_i}/{len(df)}({round(n_i*100/len(df), 2)}%)")
|
| 252 |
+
|
| 253 |
+
logger.info("-" * 30)
|
| 254 |
+
if "cluster_id" in df.columns:
|
| 255 |
+
n_cluster = df["cluster_id"].nunique()
|
| 256 |
+
for i in group_size_dict:
|
| 257 |
+
n_i = df[df["mol_group_type"] == i]["cluster_id"].nunique()
|
| 258 |
+
logger.info(f"{i}: {n_i}/{n_cluster}({round(n_i*100/n_cluster, 2)}%)")
|
| 259 |
+
logger.info("-" * 30)
|
| 260 |
+
|
| 261 |
+
logger.info(f"Final pdb ids: {len(set(df.pdb_id.tolist()))}")
|
| 262 |
+
logger.info("-" * 30)
|
| 263 |
+
|
| 264 |
+
def __len__(self) -> int:
|
| 265 |
+
return len(self.indices_list)
|
| 266 |
+
|
| 267 |
+
def save_error_data(self, idx: int, error_message: str) -> None:
|
| 268 |
+
"""
|
| 269 |
+
Saves the error data for a specific index to a JSON file in the error directory.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
idx: The index of the data sample that caused the error.
|
| 273 |
+
error_message: The error message to be saved.
|
| 274 |
+
"""
|
| 275 |
+
if self.error_dir is not None:
|
| 276 |
+
sample_indice = self._get_sample_indice(idx=idx)
|
| 277 |
+
data = sample_indice.to_dict()
|
| 278 |
+
data["error"] = error_message
|
| 279 |
+
|
| 280 |
+
filename = f"{sample_indice.pdb_id}-{sample_indice.chain_1_id}-{sample_indice.chain_2_id}.json"
|
| 281 |
+
fpath = os.path.join(self.error_dir, filename)
|
| 282 |
+
if not os.path.exists(fpath):
|
| 283 |
+
with open(fpath, "w") as f:
|
| 284 |
+
json.dump(data, f)
|
| 285 |
+
|
| 286 |
+
def __getitem__(self, idx: int):
|
| 287 |
+
"""
|
| 288 |
+
Retrieves a data sample by processing the given index.
|
| 289 |
+
If an error occurs, it attempts to handle it by either saving the error data or randomly sampling another index.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
idx: The index of the data sample to retrieve.
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
A dictionary containing the processed data sample.
|
| 296 |
+
"""
|
| 297 |
+
# Try at most 10 times
|
| 298 |
+
for _ in range(10):
|
| 299 |
+
try:
|
| 300 |
+
data = self.process_one(idx)
|
| 301 |
+
return data
|
| 302 |
+
except Exception as e:
|
| 303 |
+
error_message = f"{e} at idx {idx}:\n{traceback.format_exc()}"
|
| 304 |
+
self.save_error_data(idx, error_message)
|
| 305 |
+
|
| 306 |
+
if self.random_sample_if_failed:
|
| 307 |
+
logger.exception(f"[skip data {idx}] {error_message}")
|
| 308 |
+
# Random sample an index
|
| 309 |
+
idx = random.choice(range(len(self.indices_list)))
|
| 310 |
+
continue
|
| 311 |
+
else:
|
| 312 |
+
raise Exception(e)
|
| 313 |
+
return data
|
| 314 |
+
|
| 315 |
+
def _get_bioassembly_data(
|
| 316 |
+
self, idx: int
|
| 317 |
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
| 318 |
+
sample_indice = self._get_sample_indice(idx=idx)
|
| 319 |
+
if self.bioassembly_dict_dir is not None:
|
| 320 |
+
bioassembly_dict_fpath = os.path.join(
|
| 321 |
+
self.bioassembly_dict_dir, sample_indice.pdb_id + ".pkl.gz"
|
| 322 |
+
)
|
| 323 |
+
else:
|
| 324 |
+
bioassembly_dict_fpath = None
|
| 325 |
+
|
| 326 |
+
bioassembly_dict = DataPipeline.get_data_bioassembly(
|
| 327 |
+
bioassembly_dict_fpath=bioassembly_dict_fpath
|
| 328 |
+
)
|
| 329 |
+
bioassembly_dict["pdb_id"] = sample_indice.pdb_id
|
| 330 |
+
return sample_indice, bioassembly_dict, bioassembly_dict_fpath
|
| 331 |
+
|
| 332 |
+
@staticmethod
|
| 333 |
+
def _reassign_atom_array_chain_id(atom_array: AtomArray):
|
| 334 |
+
"""
|
| 335 |
+
In experiments conducted to observe overfitting effects using training sets,
|
| 336 |
+
the pre-stored AtomArray in the training set may experience issues with discontinuous chain IDs due to filtering.
|
| 337 |
+
Consequently, a temporary patch has been implemented to resolve this issue.
|
| 338 |
+
|
| 339 |
+
e.g. 3x6u asym_id_int = [0, 1, 2, ... 18, 20] -> reassigned_asym_id_int [0, 1, 2, ..., 18, 19]
|
| 340 |
+
"""
|
| 341 |
+
|
| 342 |
+
def _get_contiguous_array(array):
|
| 343 |
+
array_uniq = np.sort(np.unique(array))
|
| 344 |
+
map_dict = {i: idx for idx, i in enumerate(array_uniq)}
|
| 345 |
+
new_array = np.vectorize(map_dict.get)(array)
|
| 346 |
+
return new_array
|
| 347 |
+
|
| 348 |
+
atom_array.asym_id_int = _get_contiguous_array(atom_array.asym_id_int)
|
| 349 |
+
atom_array.entity_id_int = _get_contiguous_array(atom_array.entity_id_int)
|
| 350 |
+
atom_array.sym_id_int = _get_contiguous_array(atom_array.sym_id_int)
|
| 351 |
+
return atom_array
|
| 352 |
+
|
| 353 |
+
@staticmethod
|
| 354 |
+
def _shuffle_array_based_on_mol_id(token_array: TokenArray, atom_array: AtomArray):
|
| 355 |
+
"""
|
| 356 |
+
Shuffle both token_array and atom_array.
|
| 357 |
+
Atoms/tokens with the same mol_id will be shuffled as a integrated component.
|
| 358 |
+
"""
|
| 359 |
+
|
| 360 |
+
# Get token mol_id
|
| 361 |
+
centre_atom_indices = token_array.get_annotation("centre_atom_index")
|
| 362 |
+
token_mol_id = atom_array[centre_atom_indices].mol_id
|
| 363 |
+
|
| 364 |
+
# Get unique molecule IDs and shuffle them in place
|
| 365 |
+
shuffled_mol_ids = np.unique(token_mol_id).copy()
|
| 366 |
+
np.random.shuffle(shuffled_mol_ids)
|
| 367 |
+
|
| 368 |
+
# Get shuffled token indices
|
| 369 |
+
original_token_indices = np.arange(len(token_mol_id))
|
| 370 |
+
shuffled_token_indices = []
|
| 371 |
+
for mol_id in shuffled_mol_ids:
|
| 372 |
+
mol_token_indices = original_token_indices[token_mol_id == mol_id]
|
| 373 |
+
shuffled_token_indices.append(mol_token_indices)
|
| 374 |
+
shuffled_token_indices = np.concatenate(shuffled_token_indices)
|
| 375 |
+
|
| 376 |
+
# Get shuffled token and atom array
|
| 377 |
+
# Use `CropData.select_by_token_indices` to shuffle safely
|
| 378 |
+
token_array, atom_array, _, _ = CropData.select_by_token_indices(
|
| 379 |
+
token_array=token_array,
|
| 380 |
+
atom_array=atom_array,
|
| 381 |
+
selected_token_indices=shuffled_token_indices,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
return token_array, atom_array
|
| 385 |
+
|
| 386 |
+
@staticmethod
|
| 387 |
+
def _assign_random_sym_id(atom_array: AtomArray):
|
| 388 |
+
"""
|
| 389 |
+
Assign random sym_id for chains of the same entity_id
|
| 390 |
+
e.g.
|
| 391 |
+
when entity_id = 0
|
| 392 |
+
sym_id_int = [0, 1, 2] -> random_sym_id_int = [2, 0, 1]
|
| 393 |
+
when entity_id = 1
|
| 394 |
+
sym_id_int = [0, 1, 2, 3] -> random_sym_id_int = [3, 0, 1, 2]
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
def _shuffle(x):
|
| 398 |
+
x_unique = np.sort(np.unique(x))
|
| 399 |
+
x_shuffled = x_unique.copy()
|
| 400 |
+
np.random.shuffle(x_shuffled) # shuffle in-place
|
| 401 |
+
map_dict = dict(zip(x_unique, x_shuffled))
|
| 402 |
+
new_x = np.vectorize(map_dict.get)(x)
|
| 403 |
+
return new_x.copy()
|
| 404 |
+
|
| 405 |
+
for entity_id in np.unique(atom_array.label_entity_id):
|
| 406 |
+
mask = atom_array.label_entity_id == entity_id
|
| 407 |
+
atom_array.sym_id_int[mask] = _shuffle(atom_array.sym_id_int[mask])
|
| 408 |
+
return atom_array
|
| 409 |
+
|
| 410 |
+
def process_one(
|
| 411 |
+
self, idx: int, return_atom_token_array: bool = False
|
| 412 |
+
) -> dict[str, dict]:
|
| 413 |
+
"""
|
| 414 |
+
Processes a single data sample by retrieving bioassembly data, applying various transformations, and cropping the data.
|
| 415 |
+
It then extracts features and labels, and optionally returns the processed atom and token arrays.
|
| 416 |
+
|
| 417 |
+
Args:
|
| 418 |
+
idx: The index of the data sample to process.
|
| 419 |
+
return_atom_token_array: Whether to return the processed atom and token arrays.
|
| 420 |
+
|
| 421 |
+
Returns:
|
| 422 |
+
A dict containing the input features, labels, basic_info and optionally the processed atom and token arrays.
|
| 423 |
+
"""
|
| 424 |
+
|
| 425 |
+
sample_indice, bioassembly_dict, bioassembly_dict_fpath = (
|
| 426 |
+
self._get_bioassembly_data(idx=idx)
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
if self.use_reference_chains_only:
|
| 430 |
+
# Get the reference chains
|
| 431 |
+
ref_chain_ids = [sample_indice.chain_1_id, sample_indice.chain_2_id]
|
| 432 |
+
if sample_indice.type == "chain":
|
| 433 |
+
ref_chain_ids.pop(-1)
|
| 434 |
+
# Remove other chains from the bioassembly_dict
|
| 435 |
+
# Remove them safely using the crop method
|
| 436 |
+
token_centre_atom_indices = bioassembly_dict["token_array"].get_annotation(
|
| 437 |
+
"centre_atom_index"
|
| 438 |
+
)
|
| 439 |
+
token_chain_id = bioassembly_dict["atom_array"][
|
| 440 |
+
token_centre_atom_indices
|
| 441 |
+
].chain_id
|
| 442 |
+
is_ref_chain = np.isin(token_chain_id, ref_chain_ids)
|
| 443 |
+
bioassembly_dict["token_array"], bioassembly_dict["atom_array"], _, _ = (
|
| 444 |
+
CropData.select_by_token_indices(
|
| 445 |
+
token_array=bioassembly_dict["token_array"],
|
| 446 |
+
atom_array=bioassembly_dict["atom_array"],
|
| 447 |
+
selected_token_indices=np.arange(len(is_ref_chain))[is_ref_chain],
|
| 448 |
+
)
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
if self.shuffle_mols:
|
| 452 |
+
bioassembly_dict["token_array"], bioassembly_dict["atom_array"] = (
|
| 453 |
+
self._shuffle_array_based_on_mol_id(
|
| 454 |
+
token_array=bioassembly_dict["token_array"],
|
| 455 |
+
atom_array=bioassembly_dict["atom_array"],
|
| 456 |
+
)
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
if self.shuffle_sym_ids:
|
| 460 |
+
bioassembly_dict["atom_array"] = self._assign_random_sym_id(
|
| 461 |
+
bioassembly_dict["atom_array"]
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
if self.reassign_continuous_chain_ids:
|
| 465 |
+
bioassembly_dict["atom_array"] = self._reassign_atom_array_chain_id(
|
| 466 |
+
bioassembly_dict["atom_array"]
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
# Crop
|
| 470 |
+
(
|
| 471 |
+
crop_method,
|
| 472 |
+
cropped_token_array,
|
| 473 |
+
cropped_atom_array,
|
| 474 |
+
cropped_msa_features,
|
| 475 |
+
cropped_template_features,
|
| 476 |
+
reference_token_index,
|
| 477 |
+
) = self.crop(
|
| 478 |
+
sample_indice=sample_indice,
|
| 479 |
+
bioassembly_dict=bioassembly_dict,
|
| 480 |
+
**self.cropping_configs,
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
feat, label, label_full = self.get_feature_and_label(
|
| 484 |
+
idx=idx,
|
| 485 |
+
token_array=cropped_token_array,
|
| 486 |
+
atom_array=cropped_atom_array,
|
| 487 |
+
msa_features=cropped_msa_features,
|
| 488 |
+
template_features=cropped_template_features,
|
| 489 |
+
full_atom_array=bioassembly_dict["atom_array"],
|
| 490 |
+
is_spatial_crop="spatial" in crop_method.lower(),
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
# Basic info, e.g. dimension related items
|
| 494 |
+
basic_info = {
|
| 495 |
+
"pdb_id": (
|
| 496 |
+
bioassembly_dict["pdb_id"]
|
| 497 |
+
if self.is_distillation is False
|
| 498 |
+
else sample_indice["pdb_id"]
|
| 499 |
+
),
|
| 500 |
+
"N_asym": torch.tensor([len(torch.unique(feat["asym_id"]))]),
|
| 501 |
+
"N_token": torch.tensor([feat["token_index"].shape[0]]),
|
| 502 |
+
"N_atom": torch.tensor([feat["atom_to_token_idx"].shape[0]]),
|
| 503 |
+
"N_msa": torch.tensor([feat["msa"].shape[0]]),
|
| 504 |
+
"bioassembly_dict_fpath": bioassembly_dict_fpath,
|
| 505 |
+
"N_msa_prot_pair": torch.tensor([feat["prot_pair_num_alignments"]]),
|
| 506 |
+
"N_msa_prot_unpair": torch.tensor([feat["prot_unpair_num_alignments"]]),
|
| 507 |
+
"N_msa_rna_pair": torch.tensor([feat["rna_pair_num_alignments"]]),
|
| 508 |
+
"N_msa_rna_unpair": torch.tensor([feat["rna_unpair_num_alignments"]]),
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
for mol_type in ("protein", "ligand", "rna", "dna"):
|
| 512 |
+
abbr = {"protein": "prot", "ligand": "lig"}
|
| 513 |
+
abbr_type = abbr.get(mol_type, mol_type)
|
| 514 |
+
mol_type_mask = feat[f"is_{mol_type}"].bool()
|
| 515 |
+
n_atom = int(mol_type_mask.sum(dim=-1).item())
|
| 516 |
+
n_token = len(torch.unique(feat["atom_to_token_idx"][mol_type_mask]))
|
| 517 |
+
basic_info[f"N_{abbr_type}_atom"] = torch.tensor([n_atom])
|
| 518 |
+
basic_info[f"N_{abbr_type}_token"] = torch.tensor([n_token])
|
| 519 |
+
|
| 520 |
+
# Add chain level chain_id
|
| 521 |
+
asymn_id_to_chain_id = {
|
| 522 |
+
atom.asym_id_int: atom.chain_id for atom in cropped_atom_array
|
| 523 |
+
}
|
| 524 |
+
chain_id_list = [
|
| 525 |
+
asymn_id_to_chain_id[asymn_id_int]
|
| 526 |
+
for asymn_id_int in sorted(asymn_id_to_chain_id.keys())
|
| 527 |
+
]
|
| 528 |
+
basic_info["chain_id"] = chain_id_list
|
| 529 |
+
|
| 530 |
+
data = {
|
| 531 |
+
"input_feature_dict": feat,
|
| 532 |
+
"label_dict": label,
|
| 533 |
+
"label_full_dict": label_full,
|
| 534 |
+
"basic": basic_info,
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
if return_atom_token_array:
|
| 538 |
+
data["cropped_atom_array"] = cropped_atom_array
|
| 539 |
+
data["cropped_token_array"] = cropped_token_array
|
| 540 |
+
return data
|
| 541 |
+
|
| 542 |
+
def crop(
|
| 543 |
+
self,
|
| 544 |
+
sample_indice: pd.Series,
|
| 545 |
+
bioassembly_dict: dict[str, Any],
|
| 546 |
+
crop_size: int,
|
| 547 |
+
method_weights: list[float],
|
| 548 |
+
contiguous_crop_complete_lig: bool = True,
|
| 549 |
+
spatial_crop_complete_lig: bool = True,
|
| 550 |
+
drop_last: bool = True,
|
| 551 |
+
remove_metal: bool = True,
|
| 552 |
+
) -> tuple[str, TokenArray, AtomArray, dict[str, Any], dict[str, Any]]:
|
| 553 |
+
"""
|
| 554 |
+
Crops the bioassembly data based on the specified configurations.
|
| 555 |
+
|
| 556 |
+
Returns:
|
| 557 |
+
A tuple containing the cropping method, cropped token array, cropped atom array,
|
| 558 |
+
cropped MSA features, and cropped template features.
|
| 559 |
+
"""
|
| 560 |
+
return DataPipeline.crop(
|
| 561 |
+
one_sample=sample_indice,
|
| 562 |
+
bioassembly_dict=bioassembly_dict,
|
| 563 |
+
crop_size=crop_size,
|
| 564 |
+
msa_featurizer=self.msa_featurizer,
|
| 565 |
+
template_featurizer=self.template_featurizer,
|
| 566 |
+
method_weights=method_weights,
|
| 567 |
+
contiguous_crop_complete_lig=contiguous_crop_complete_lig,
|
| 568 |
+
spatial_crop_complete_lig=spatial_crop_complete_lig,
|
| 569 |
+
drop_last=drop_last,
|
| 570 |
+
remove_metal=remove_metal,
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
def _get_sample_indice(self, idx: int) -> pd.Series:
|
| 574 |
+
"""
|
| 575 |
+
Retrieves the sample indice for a given index. If the dataset is grouped by PDB ID, it returns the first row of the PDB-idx.
|
| 576 |
+
Otherwise, it returns the row at the specified index.
|
| 577 |
+
|
| 578 |
+
Args:
|
| 579 |
+
idx: The index of the data sample to retrieve.
|
| 580 |
+
|
| 581 |
+
Returns:
|
| 582 |
+
A pandas Series containing the sample indice.
|
| 583 |
+
"""
|
| 584 |
+
if self.group_by_pdb_id:
|
| 585 |
+
# Row-0 of PDB-idx
|
| 586 |
+
sample_indice = self.indices_list[idx].iloc[0]
|
| 587 |
+
else:
|
| 588 |
+
sample_indice = self.indices_list.iloc[idx]
|
| 589 |
+
return sample_indice
|
| 590 |
+
|
| 591 |
+
def _get_eval_chain_interface_mask(
|
| 592 |
+
self, idx: int, atom_array_chain_id: np.ndarray
|
| 593 |
+
) -> tuple[np.ndarray, np.ndarray, torch.Tensor, torch.Tensor]:
|
| 594 |
+
"""
|
| 595 |
+
Retrieves the evaluation chain/interface mask for a given index.
|
| 596 |
+
|
| 597 |
+
Args:
|
| 598 |
+
idx: The index of the data sample.
|
| 599 |
+
atom_array_chain_id: An array containing the chain IDs of the atom array.
|
| 600 |
+
|
| 601 |
+
Returns:
|
| 602 |
+
A tuple containing the evaluation type, cluster ID, chain 1 mask, and chain 2 mask.
|
| 603 |
+
"""
|
| 604 |
+
if self.group_by_pdb_id:
|
| 605 |
+
df = self.indices_list[idx]
|
| 606 |
+
else:
|
| 607 |
+
df = self.indices_list.iloc[idx : idx + 1]
|
| 608 |
+
|
| 609 |
+
# Only consider chain/interfaces defined in EvaluationChainInterface
|
| 610 |
+
df = df[df["eval_type"].apply(lambda x: x in EvaluationChainInterface)].copy()
|
| 611 |
+
if len(df) < 1:
|
| 612 |
+
raise ValueError(
|
| 613 |
+
f"Cannot find a chain/interface for evaluation in the PDB."
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
def get_atom_mask(row):
|
| 617 |
+
chain_1_mask = atom_array_chain_id == row["chain_1_id"]
|
| 618 |
+
if row["type"] == "chain":
|
| 619 |
+
chain_2_mask = chain_1_mask
|
| 620 |
+
else:
|
| 621 |
+
chain_2_mask = atom_array_chain_id == row["chain_2_id"]
|
| 622 |
+
chain_1_mask = torch.tensor(chain_1_mask).bool()
|
| 623 |
+
chain_2_mask = torch.tensor(chain_2_mask).bool()
|
| 624 |
+
if chain_1_mask.sum() == 0 or chain_2_mask.sum() == 0:
|
| 625 |
+
return None, None
|
| 626 |
+
return chain_1_mask, chain_2_mask
|
| 627 |
+
|
| 628 |
+
df["chain_1_mask"], df["chain_2_mask"] = zip(*df.apply(get_atom_mask, axis=1))
|
| 629 |
+
df = df[df["chain_1_mask"].notna()] # drop NaN
|
| 630 |
+
|
| 631 |
+
if len(df) < 1:
|
| 632 |
+
raise ValueError(
|
| 633 |
+
f"Cannot find a chain/interface for evaluation in the atom_array."
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
eval_type = np.array(df["eval_type"].tolist())
|
| 637 |
+
cluster_id = np.array(df["cluster_id"].tolist())
|
| 638 |
+
# [N_eval, N_atom]
|
| 639 |
+
chain_1_mask = torch.stack(df["chain_1_mask"].tolist())
|
| 640 |
+
# [N_eval, N_atom]
|
| 641 |
+
chain_2_mask = torch.stack(df["chain_2_mask"].tolist())
|
| 642 |
+
|
| 643 |
+
return eval_type, cluster_id, chain_1_mask, chain_2_mask
|
| 644 |
+
|
| 645 |
+
def get_feature_and_label(
|
| 646 |
+
self,
|
| 647 |
+
idx: int,
|
| 648 |
+
token_array: TokenArray,
|
| 649 |
+
atom_array: AtomArray,
|
| 650 |
+
msa_features: dict[str, Any],
|
| 651 |
+
template_features: dict[str, Any],
|
| 652 |
+
full_atom_array: AtomArray,
|
| 653 |
+
is_spatial_crop: bool = True,
|
| 654 |
+
) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]:
|
| 655 |
+
"""
|
| 656 |
+
Get feature and label information for a given data point.
|
| 657 |
+
It uses a Featurizer object to obtain input features and labels, and applies several
|
| 658 |
+
steps to add other features and labels. Finally, it returns the feature dictionary, label
|
| 659 |
+
dictionary, and a full label dictionary.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
idx: Index of the data point.
|
| 663 |
+
token_array: Token array representing the amino acid sequence.
|
| 664 |
+
atom_array: Atom array containing atomic information.
|
| 665 |
+
msa_features: Dictionary of MSA features.
|
| 666 |
+
template_features: Dictionary of template features.
|
| 667 |
+
full_atom_array: Full atom array containing all atoms.
|
| 668 |
+
is_spatial_crop: Flag indicating whether spatial cropping is applied, by default True.
|
| 669 |
+
|
| 670 |
+
Returns:
|
| 671 |
+
A tuple containing the feature dictionary and the label dictionary.
|
| 672 |
+
|
| 673 |
+
Raises:
|
| 674 |
+
ValueError: If the ligand cannot be found in the data point.
|
| 675 |
+
"""
|
| 676 |
+
# Get feature and labels from Featurizer
|
| 677 |
+
feat = Featurizer(
|
| 678 |
+
cropped_token_array=token_array,
|
| 679 |
+
cropped_atom_array=atom_array,
|
| 680 |
+
ref_pos_augment=self.ref_pos_augment,
|
| 681 |
+
lig_atom_rename=self.lig_atom_rename,
|
| 682 |
+
)
|
| 683 |
+
features_dict = feat.get_all_input_features()
|
| 684 |
+
labels_dict = feat.get_labels()
|
| 685 |
+
|
| 686 |
+
# Permutation list for atom permutation
|
| 687 |
+
features_dict["atom_perm_list"] = feat.get_atom_permutation_list()
|
| 688 |
+
|
| 689 |
+
# Labels for multi-chain permutation
|
| 690 |
+
# Note: the returned full_atom_array may contain fewer atoms than the input
|
| 691 |
+
label_full_dict, full_atom_array = Featurizer.get_gt_full_complex_features(
|
| 692 |
+
atom_array=full_atom_array,
|
| 693 |
+
cropped_atom_array=atom_array,
|
| 694 |
+
get_cropped_asym_only=is_spatial_crop,
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
# Masks for Pocket Metrics
|
| 698 |
+
if self.find_pocket:
|
| 699 |
+
# Get entity_id of the interested ligand
|
| 700 |
+
sample_indice = self._get_sample_indice(idx=idx)
|
| 701 |
+
if sample_indice.mol_1_type == "ligand":
|
| 702 |
+
lig_entity_id = str(sample_indice.entity_1_id)
|
| 703 |
+
lig_chain_id = str(sample_indice.chain_1_id)
|
| 704 |
+
elif sample_indice.mol_2_type == "ligand":
|
| 705 |
+
lig_entity_id = str(sample_indice.entity_2_id)
|
| 706 |
+
lig_chain_id = str(sample_indice.chain_2_id)
|
| 707 |
+
else:
|
| 708 |
+
raise ValueError(f"Cannot find ligand from this data point.")
|
| 709 |
+
# Make sure the cropped array contains interested ligand
|
| 710 |
+
assert lig_entity_id in set(atom_array.label_entity_id)
|
| 711 |
+
assert lig_chain_id in set(atom_array.chain_id)
|
| 712 |
+
|
| 713 |
+
# Get asym ID of the specific ligand in the `main` pocket
|
| 714 |
+
lig_asym_id = atom_array.label_asym_id[atom_array.chain_id == lig_chain_id]
|
| 715 |
+
assert len(np.unique(lig_asym_id)) == 1
|
| 716 |
+
lig_asym_id = lig_asym_id[0]
|
| 717 |
+
ligands = [lig_asym_id]
|
| 718 |
+
|
| 719 |
+
if self.find_all_pockets:
|
| 720 |
+
# Get asym ID of other ligands with the same entity_id
|
| 721 |
+
all_lig_asym_ids = set(
|
| 722 |
+
full_atom_array[
|
| 723 |
+
full_atom_array.label_entity_id == lig_entity_id
|
| 724 |
+
].label_asym_id
|
| 725 |
+
)
|
| 726 |
+
ligands.extend(list(all_lig_asym_ids - set([lig_asym_id])))
|
| 727 |
+
|
| 728 |
+
# Note: the `main` pocket is the 0-indexed one.
|
| 729 |
+
# [N_pocket, N_atom], [N_pocket, N_atom].
|
| 730 |
+
# If not find_all_pockets, then N_pocket = 1.
|
| 731 |
+
interested_ligand_mask, pocket_mask = feat.get_lig_pocket_mask(
|
| 732 |
+
atom_array=full_atom_array, lig_label_asym_id=ligands
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
label_full_dict["pocket_mask"] = pocket_mask
|
| 736 |
+
label_full_dict["interested_ligand_mask"] = interested_ligand_mask
|
| 737 |
+
|
| 738 |
+
# Masks for Chain/Interface Metrics
|
| 739 |
+
if self.find_eval_chain_interface:
|
| 740 |
+
eval_type, cluster_id, chain_1_mask, chain_2_mask = (
|
| 741 |
+
self._get_eval_chain_interface_mask(
|
| 742 |
+
idx=idx, atom_array_chain_id=full_atom_array.chain_id
|
| 743 |
+
)
|
| 744 |
+
)
|
| 745 |
+
labels_dict["eval_type"] = eval_type # [N_eval]
|
| 746 |
+
labels_dict["cluster_id"] = cluster_id # [N_eval]
|
| 747 |
+
labels_dict["chain_1_mask"] = chain_1_mask # [N_eval, N_atom]
|
| 748 |
+
labels_dict["chain_2_mask"] = chain_2_mask # [N_eval, N_atom]
|
| 749 |
+
|
| 750 |
+
# Make dummy features for not implemented features
|
| 751 |
+
dummy_feats = []
|
| 752 |
+
if len(msa_features) == 0:
|
| 753 |
+
dummy_feats.append("msa")
|
| 754 |
+
else:
|
| 755 |
+
msa_features = dict_to_tensor(msa_features)
|
| 756 |
+
features_dict.update(msa_features)
|
| 757 |
+
if len(template_features) == 0:
|
| 758 |
+
dummy_feats.append("template")
|
| 759 |
+
else:
|
| 760 |
+
template_features = dict_to_tensor(template_features)
|
| 761 |
+
features_dict.update(template_features)
|
| 762 |
+
|
| 763 |
+
features_dict = make_dummy_feature(
|
| 764 |
+
features_dict=features_dict, dummy_feats=dummy_feats
|
| 765 |
+
)
|
| 766 |
+
# Transform to right data type
|
| 767 |
+
features_dict = data_type_transform(feat_or_label_dict=features_dict)
|
| 768 |
+
labels_dict = data_type_transform(feat_or_label_dict=labels_dict)
|
| 769 |
+
|
| 770 |
+
# Is_distillation
|
| 771 |
+
features_dict["is_distillation"] = torch.tensor([self.is_distillation])
|
| 772 |
+
if self.is_distillation is True:
|
| 773 |
+
features_dict["resolution"] = torch.tensor([-1.0])
|
| 774 |
+
return features_dict, labels_dict, label_full_dict
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
def get_msa_featurizer(configs, dataset_name: str, stage: str) -> Optional[Callable]:
|
| 778 |
+
"""
|
| 779 |
+
Creates and returns an MSAFeaturizer object based on the provided configurations.
|
| 780 |
+
|
| 781 |
+
Args:
|
| 782 |
+
configs: A dictionary containing the configurations for the MSAFeaturizer.
|
| 783 |
+
dataset_name: The name of the dataset.
|
| 784 |
+
stage: The stage of the dataset (e.g., 'train', 'test').
|
| 785 |
+
|
| 786 |
+
Returns:
|
| 787 |
+
An MSAFeaturizer object if MSA is enabled in the configurations, otherwise None.
|
| 788 |
+
"""
|
| 789 |
+
if "msa" in configs["data"] and configs["data"]["msa"]["enable"]:
|
| 790 |
+
msa_info = configs["data"]["msa"]
|
| 791 |
+
msa_args = deepcopy(msa_info)
|
| 792 |
+
|
| 793 |
+
if "msa" in (dataset_config := configs["data"][dataset_name]):
|
| 794 |
+
for k, v in dataset_config["msa"].items():
|
| 795 |
+
if k not in ["prot", "rna"]:
|
| 796 |
+
msa_args[k] = v
|
| 797 |
+
else:
|
| 798 |
+
for kk, vv in dataset_config["msa"][k].items():
|
| 799 |
+
msa_args[k][kk] = vv
|
| 800 |
+
|
| 801 |
+
prot_msa_args = msa_args["prot"]
|
| 802 |
+
prot_msa_args.update(
|
| 803 |
+
{
|
| 804 |
+
"dataset_name": dataset_name,
|
| 805 |
+
"merge_method": msa_args["merge_method"],
|
| 806 |
+
"max_size": msa_args["max_size"][stage],
|
| 807 |
+
}
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
rna_msa_args = msa_args["rna"]
|
| 811 |
+
rna_msa_args.update(
|
| 812 |
+
{
|
| 813 |
+
"dataset_name": dataset_name,
|
| 814 |
+
"merge_method": msa_args["merge_method"],
|
| 815 |
+
"max_size": msa_args["max_size"][stage],
|
| 816 |
+
}
|
| 817 |
+
)
|
| 818 |
+
|
| 819 |
+
return MSAFeaturizer(
|
| 820 |
+
prot_msa_args=prot_msa_args,
|
| 821 |
+
rna_msa_args=rna_msa_args,
|
| 822 |
+
enable_rna_msa=configs.data.msa.enable_rna_msa,
|
| 823 |
+
)
|
| 824 |
+
|
| 825 |
+
else:
|
| 826 |
+
return None
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
class WeightedMultiDataset(Dataset):
|
| 830 |
+
"""
|
| 831 |
+
A weighted dataset composed of multiple datasets with weights.
|
| 832 |
+
"""
|
| 833 |
+
|
| 834 |
+
def __init__(
|
| 835 |
+
self,
|
| 836 |
+
datasets: list[Dataset],
|
| 837 |
+
dataset_names: list[str],
|
| 838 |
+
datapoint_weights: list[list[float]],
|
| 839 |
+
dataset_sample_weights: list[torch.tensor],
|
| 840 |
+
):
|
| 841 |
+
"""
|
| 842 |
+
Initializes the WeightedMultiDataset.
|
| 843 |
+
Args:
|
| 844 |
+
datasets: A list of Dataset objects.
|
| 845 |
+
dataset_names: A list of dataset names corresponding to the datasets.
|
| 846 |
+
datapoint_weights: A list of lists containing sampling weights for each datapoint in the datasets.
|
| 847 |
+
dataset_sample_weights: A list of torch tensors containing sampling weights for each dataset.
|
| 848 |
+
"""
|
| 849 |
+
self.datasets = datasets
|
| 850 |
+
self.dataset_names = dataset_names
|
| 851 |
+
self.datapoint_weights = datapoint_weights
|
| 852 |
+
self.dataset_sample_weights = torch.Tensor(dataset_sample_weights)
|
| 853 |
+
self.iteration = 0
|
| 854 |
+
self.offset = 0
|
| 855 |
+
self.init_datasets()
|
| 856 |
+
|
| 857 |
+
def init_datasets(self):
|
| 858 |
+
"""Calculate global weights of each datapoint in datasets for future sampling."""
|
| 859 |
+
self.merged_datapoint_weights = []
|
| 860 |
+
self.weight = 0.0
|
| 861 |
+
self.dataset_indices = []
|
| 862 |
+
self.within_dataset_indices = []
|
| 863 |
+
for dataset_index, (
|
| 864 |
+
dataset,
|
| 865 |
+
datapoint_weight_list,
|
| 866 |
+
dataset_weight,
|
| 867 |
+
) in enumerate(
|
| 868 |
+
zip(self.datasets, self.datapoint_weights, self.dataset_sample_weights)
|
| 869 |
+
):
|
| 870 |
+
# normalize each dataset weights
|
| 871 |
+
weight_sum = sum(datapoint_weight_list)
|
| 872 |
+
datapoint_weight_list = [
|
| 873 |
+
dataset_weight * w / weight_sum for w in datapoint_weight_list
|
| 874 |
+
]
|
| 875 |
+
self.merged_datapoint_weights.extend(datapoint_weight_list)
|
| 876 |
+
self.weight += dataset_weight
|
| 877 |
+
self.dataset_indices.extend([dataset_index] * len(datapoint_weight_list))
|
| 878 |
+
self.within_dataset_indices.extend(list(range(len(datapoint_weight_list))))
|
| 879 |
+
self.merged_datapoint_weights = torch.tensor(
|
| 880 |
+
self.merged_datapoint_weights, dtype=torch.float64
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
def __len__(self) -> int:
|
| 884 |
+
return len(self.merged_datapoint_weights)
|
| 885 |
+
|
| 886 |
+
def __getitem__(self, index: int) -> dict[str, dict]:
|
| 887 |
+
return self.datasets[self.dataset_indices[index]][
|
| 888 |
+
self.within_dataset_indices[index]
|
| 889 |
+
]
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
def get_weighted_pdb_weight(
|
| 893 |
+
data_type: str,
|
| 894 |
+
cluster_size: int,
|
| 895 |
+
chain_count: dict,
|
| 896 |
+
eps: float = 1e-9,
|
| 897 |
+
beta_dict: dict = {
|
| 898 |
+
"chain": 0.5,
|
| 899 |
+
"interface": 1,
|
| 900 |
+
},
|
| 901 |
+
alpha_dict: dict = {
|
| 902 |
+
"prot": 3,
|
| 903 |
+
"nuc": 3,
|
| 904 |
+
"ligand": 1,
|
| 905 |
+
},
|
| 906 |
+
) -> float:
|
| 907 |
+
"""
|
| 908 |
+
Get sample weight for each examples in weighted pdb dataset. AF3-SI (1)
|
| 909 |
+
Args:
|
| 910 |
+
data_type: chain or interface
|
| 911 |
+
cluster_size: cluster size of this chain/interface
|
| 912 |
+
chain_count: count of each kinds of chains, {"prot": int, "nuc": int, "ligand": int}
|
| 913 |
+
Returns:
|
| 914 |
+
weights: float
|
| 915 |
+
"""
|
| 916 |
+
assert cluster_size > 0
|
| 917 |
+
assert data_type in ["chain", "interface"]
|
| 918 |
+
beta = beta_dict[data_type]
|
| 919 |
+
assert set(chain_count.keys()).issubset(set(alpha_dict.keys()))
|
| 920 |
+
weight = (
|
| 921 |
+
beta
|
| 922 |
+
* sum(
|
| 923 |
+
[alpha * chain_count[data_mode] for data_mode, alpha in alpha_dict.items()]
|
| 924 |
+
)
|
| 925 |
+
/ (cluster_size + eps)
|
| 926 |
+
)
|
| 927 |
+
return weight
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
def calc_weights_for_df(
|
| 931 |
+
indices_df: pd.DataFrame, beta_dict: dict[str, Any], alpha_dict: dict[str, Any]
|
| 932 |
+
) -> pd.DataFrame:
|
| 933 |
+
"""
|
| 934 |
+
Calculate weights for each example in the dataframe.
|
| 935 |
+
|
| 936 |
+
Args:
|
| 937 |
+
indices_df: A pandas DataFrame containing the indices.
|
| 938 |
+
beta_dict: A dictionary containing beta values for different data types.
|
| 939 |
+
alpha_dict: A dictionary containing alpha values for different data types.
|
| 940 |
+
|
| 941 |
+
Returns:
|
| 942 |
+
A pandas DataFrame with an column 'weights' containing the calculated weights.
|
| 943 |
+
"""
|
| 944 |
+
# Specific to assembly, and entities (chain or interface)
|
| 945 |
+
indices_df["pdb_sorted_entity_id"] = indices_df.apply(
|
| 946 |
+
lambda x: f"{x['pdb_id']}_{x['assembly_id']}_{'_'.join(sorted([str(x['entity_1_id']), str(x['entity_2_id'])]))}",
|
| 947 |
+
axis=1,
|
| 948 |
+
)
|
| 949 |
+
|
| 950 |
+
entity_member_num_dict = {}
|
| 951 |
+
for pdb_sorted_entity_id, sub_df in indices_df.groupby("pdb_sorted_entity_id"):
|
| 952 |
+
# Number of repeatative entities in the same assembly
|
| 953 |
+
entity_member_num_dict[pdb_sorted_entity_id] = len(sub_df)
|
| 954 |
+
indices_df["pdb_sorted_entity_id_member_num"] = indices_df.apply(
|
| 955 |
+
lambda x: entity_member_num_dict[x["pdb_sorted_entity_id"]], axis=1
|
| 956 |
+
)
|
| 957 |
+
|
| 958 |
+
cluster_size_record = {}
|
| 959 |
+
for cluster_id, sub_df in indices_df.groupby("cluster_id"):
|
| 960 |
+
cluster_size_record[cluster_id] = len(set(sub_df["pdb_sorted_entity_id"]))
|
| 961 |
+
|
| 962 |
+
weights = []
|
| 963 |
+
for _, row in indices_df.iterrows():
|
| 964 |
+
data_type = row["type"]
|
| 965 |
+
cluster_size = cluster_size_record[row["cluster_id"]]
|
| 966 |
+
chain_count = {"prot": 0, "nuc": 0, "ligand": 0}
|
| 967 |
+
for mol_type in [row["mol_1_type"], row["mol_2_type"]]:
|
| 968 |
+
if chain_count.get(mol_type) is None:
|
| 969 |
+
continue
|
| 970 |
+
chain_count[mol_type] += 1
|
| 971 |
+
# Weight specific to (assembly, entity(chain/interface))
|
| 972 |
+
weight = get_weighted_pdb_weight(
|
| 973 |
+
data_type=data_type,
|
| 974 |
+
cluster_size=cluster_size,
|
| 975 |
+
chain_count=chain_count,
|
| 976 |
+
beta_dict=beta_dict,
|
| 977 |
+
alpha_dict=alpha_dict,
|
| 978 |
+
)
|
| 979 |
+
weights.append(weight)
|
| 980 |
+
indices_df["weights"] = weights / indices_df["pdb_sorted_entity_id_member_num"]
|
| 981 |
+
return indices_df
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
def get_sample_weights(
|
| 985 |
+
sampler_type: str,
|
| 986 |
+
indices_df: pd.DataFrame = None,
|
| 987 |
+
beta_dict: dict = {
|
| 988 |
+
"chain": 0.5,
|
| 989 |
+
"interface": 1,
|
| 990 |
+
},
|
| 991 |
+
alpha_dict: dict = {
|
| 992 |
+
"prot": 3,
|
| 993 |
+
"nuc": 3,
|
| 994 |
+
"ligand": 1,
|
| 995 |
+
},
|
| 996 |
+
force_recompute_weight: bool = False,
|
| 997 |
+
) -> Union[pd.Series, list[float]]:
|
| 998 |
+
"""
|
| 999 |
+
Computes sample weights based on the specified sampler type.
|
| 1000 |
+
|
| 1001 |
+
Args:
|
| 1002 |
+
sampler_type: The type of sampler to use ('weighted' or 'uniform').
|
| 1003 |
+
indices_df: A pandas DataFrame containing the indices.
|
| 1004 |
+
beta_dict: A dictionary containing beta values for different data types.
|
| 1005 |
+
alpha_dict: A dictionary containing alpha values for different data types.
|
| 1006 |
+
force_recompute_weight: Whether to force recomputation of weights even if they already exist.
|
| 1007 |
+
|
| 1008 |
+
Returns:
|
| 1009 |
+
A list of sample weights.
|
| 1010 |
+
|
| 1011 |
+
Raises:
|
| 1012 |
+
ValueError: If an unknown sampler type is provided.
|
| 1013 |
+
"""
|
| 1014 |
+
if sampler_type == "weighted":
|
| 1015 |
+
assert indices_df is not None
|
| 1016 |
+
if "weights" not in indices_df.columns or force_recompute_weight:
|
| 1017 |
+
indices_df = calc_weights_for_df(
|
| 1018 |
+
indices_df=indices_df,
|
| 1019 |
+
beta_dict=beta_dict,
|
| 1020 |
+
alpha_dict=alpha_dict,
|
| 1021 |
+
)
|
| 1022 |
+
return indices_df["weights"].astype("float32")
|
| 1023 |
+
elif sampler_type == "uniform":
|
| 1024 |
+
assert indices_df is not None
|
| 1025 |
+
return [1 / len(indices_df) for _ in range(len(indices_df))]
|
| 1026 |
+
else:
|
| 1027 |
+
raise ValueError(f"Unknown sampler type: {sampler_type}")
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
def get_datasets(
|
| 1031 |
+
configs: ConfigDict, error_dir: Optional[str]
|
| 1032 |
+
) -> tuple[WeightedMultiDataset, dict[str, BaseSingleDataset]]:
|
| 1033 |
+
"""
|
| 1034 |
+
Get training and testing datasets given configs
|
| 1035 |
+
|
| 1036 |
+
Args:
|
| 1037 |
+
configs: A ConfigDict containing the dataset configurations.
|
| 1038 |
+
error_dir: The directory where error logs will be saved.
|
| 1039 |
+
|
| 1040 |
+
Returns:
|
| 1041 |
+
A tuple containing the training dataset and a dictionary of testing datasets.
|
| 1042 |
+
"""
|
| 1043 |
+
|
| 1044 |
+
def _get_dataset_param(config_dict, dataset_name: str, stage: str):
|
| 1045 |
+
# Template_featurizer is under development
|
| 1046 |
+
# Lig_atom_rename/shuffle_mols/shuffle_sym_ids do not affect the performance very much
|
| 1047 |
+
return {
|
| 1048 |
+
"name": dataset_name,
|
| 1049 |
+
**config_dict["base_info"],
|
| 1050 |
+
"cropping_configs": config_dict["cropping_configs"],
|
| 1051 |
+
"error_dir": error_dir,
|
| 1052 |
+
"msa_featurizer": get_msa_featurizer(configs, dataset_name, stage),
|
| 1053 |
+
"template_featurizer": None,
|
| 1054 |
+
"lig_atom_rename": config_dict.get("lig_atom_rename", False),
|
| 1055 |
+
"shuffle_mols": config_dict.get("shuffle_mols", False),
|
| 1056 |
+
"shuffle_sym_ids": config_dict.get("shuffle_sym_ids", False),
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
data_config = configs.data
|
| 1060 |
+
logger.info(f"Using train sets {data_config.train_sets}")
|
| 1061 |
+
assert len(data_config.train_sets) == len(
|
| 1062 |
+
data_config.train_sampler.train_sample_weights
|
| 1063 |
+
)
|
| 1064 |
+
train_datasets = []
|
| 1065 |
+
datapoint_weights = []
|
| 1066 |
+
for train_name in data_config.train_sets:
|
| 1067 |
+
config_dict = data_config[train_name].to_dict()
|
| 1068 |
+
dataset_param = _get_dataset_param(
|
| 1069 |
+
config_dict, dataset_name=train_name, stage="train"
|
| 1070 |
+
)
|
| 1071 |
+
dataset_param["ref_pos_augment"] = data_config.get(
|
| 1072 |
+
"train_ref_pos_augment", True
|
| 1073 |
+
)
|
| 1074 |
+
dataset_param["limits"] = data_config.get("limits", -1)
|
| 1075 |
+
train_dataset = BaseSingleDataset(**dataset_param)
|
| 1076 |
+
train_datasets.append(train_dataset)
|
| 1077 |
+
datapoint_weights.append(
|
| 1078 |
+
get_sample_weights(
|
| 1079 |
+
**data_config[train_name]["sampler_configs"],
|
| 1080 |
+
indices_df=train_dataset.indices_list,
|
| 1081 |
+
)
|
| 1082 |
+
)
|
| 1083 |
+
train_dataset = WeightedMultiDataset(
|
| 1084 |
+
datasets=train_datasets,
|
| 1085 |
+
dataset_names=data_config.train_sets,
|
| 1086 |
+
datapoint_weights=datapoint_weights,
|
| 1087 |
+
dataset_sample_weights=data_config.train_sampler.train_sample_weights,
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
test_datasets = {}
|
| 1091 |
+
test_sets = data_config.test_sets
|
| 1092 |
+
for test_name in test_sets:
|
| 1093 |
+
config_dict = data_config[test_name].to_dict()
|
| 1094 |
+
dataset_param = _get_dataset_param(
|
| 1095 |
+
config_dict, dataset_name=test_name, stage="test"
|
| 1096 |
+
)
|
| 1097 |
+
dataset_param["ref_pos_augment"] = data_config.get("test_ref_pos_augment", True)
|
| 1098 |
+
test_dataset = BaseSingleDataset(**dataset_param)
|
| 1099 |
+
test_datasets[test_name] = test_dataset
|
| 1100 |
+
return train_dataset, test_datasets
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/featurizer.py
ADDED
|
@@ -0,0 +1,802 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import copy
|
| 16 |
+
from collections import defaultdict
|
| 17 |
+
from typing import Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from biotite.structure import Atom, AtomArray, get_residue_starts
|
| 22 |
+
from sklearn.neighbors import KDTree
|
| 23 |
+
|
| 24 |
+
from protenix.data.constants import STD_RESIDUES, get_all_elems
|
| 25 |
+
from protenix.data.tokenizer import Token, TokenArray
|
| 26 |
+
from protenix.data.utils import get_ligand_polymer_bond_mask
|
| 27 |
+
from protenix.utils.geometry import angle_3p, random_transform
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Featurizer(object):
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
cropped_token_array: TokenArray,
|
| 34 |
+
cropped_atom_array: AtomArray,
|
| 35 |
+
ref_pos_augment: bool = True,
|
| 36 |
+
lig_atom_rename: bool = False,
|
| 37 |
+
) -> None:
|
| 38 |
+
"""
|
| 39 |
+
Args:
|
| 40 |
+
cropped_token_array (TokenArray): TokenArray object after cropping
|
| 41 |
+
cropped_atom_array (AtomArray): AtomArray object after cropping
|
| 42 |
+
ref_pos_augment (bool): Boolean indicating whether apply random rotation and translation on ref_pos
|
| 43 |
+
lig_atom_rename (bool): Boolean indicating whether rename atom name for ligand atoms
|
| 44 |
+
"""
|
| 45 |
+
self.cropped_token_array = cropped_token_array
|
| 46 |
+
|
| 47 |
+
self.cropped_atom_array = cropped_atom_array
|
| 48 |
+
self.ref_pos_augment = ref_pos_augment
|
| 49 |
+
self.lig_atom_rename = lig_atom_rename
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def encoder(encode_def_list: list[str], input_list: list[str]) -> torch.Tensor:
|
| 53 |
+
"""
|
| 54 |
+
Encode a list of input values into a binary format using a specified encoding definition list.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
encode_def_list (list): A list of encoding definitions.
|
| 58 |
+
input_list (list): A list of input values to be encoded.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
torch.Tensor: A tensor representing the binary encoding of the input values.
|
| 62 |
+
"""
|
| 63 |
+
onehot_dict = {}
|
| 64 |
+
num_keys = len(encode_def_list)
|
| 65 |
+
for index, key in enumerate(encode_def_list):
|
| 66 |
+
onehot = [0] * num_keys
|
| 67 |
+
onehot[index] = 1
|
| 68 |
+
onehot_dict[key] = onehot
|
| 69 |
+
|
| 70 |
+
onehot_encoded_data = [onehot_dict[item] for item in input_list]
|
| 71 |
+
onehot_tensor = torch.Tensor(onehot_encoded_data)
|
| 72 |
+
return onehot_tensor
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def restype_onehot_encoded(restype_list: list[str]) -> torch.Tensor:
|
| 76 |
+
"""
|
| 77 |
+
Ref: AlphaFold3 SI Table 5 "restype"
|
| 78 |
+
One-hot encoding of the sequence. 32 possible values: 20 amino acids + unknown,
|
| 79 |
+
4 RNA nucleotides + unknown, 4 DNA nucleotides + unknown, and gap.
|
| 80 |
+
Ligands represented as “unknown amino acid”.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
restype_list (List[str]): A list of residue types.
|
| 84 |
+
The residue type of ligand should be "UNK" in the input list.
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
torch.Tensor: A Tensor of one-hot encoded residue types
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
return Featurizer.encoder(list(STD_RESIDUES.keys()) + ["-"], restype_list)
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def elem_onehot_encoded(elem_list: list[str]) -> torch.Tensor:
|
| 94 |
+
"""
|
| 95 |
+
Ref: AlphaFold3 SI Table 5 "ref_element"
|
| 96 |
+
One-hot encoding of the element atomic number for each atom
|
| 97 |
+
in the reference conformer, up to atomic number 128.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
elem_list (List[str]): A list of element symbols.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
torch.Tensor: A Tensor of one-hot encoded elements
|
| 104 |
+
"""
|
| 105 |
+
return Featurizer.encoder(get_all_elems(), elem_list)
|
| 106 |
+
|
| 107 |
+
@staticmethod
|
| 108 |
+
def ref_atom_name_chars_encoded(atom_names: list[str]) -> torch.Tensor:
|
| 109 |
+
"""
|
| 110 |
+
Ref: AlphaFold3 SI Table 5 "ref_atom_name_chars"
|
| 111 |
+
One-hot encoding of the unique atom names in the reference conformer.
|
| 112 |
+
Each character is encoded as ord(c) − 32, and names are padded to length 4.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
atom_name_list (List[str]): A list of atom names.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
torch.Tensor: A Tensor of character encoded atom names
|
| 119 |
+
"""
|
| 120 |
+
onehot_dict = {}
|
| 121 |
+
for index, key in enumerate(range(64)):
|
| 122 |
+
onehot = [0] * 64
|
| 123 |
+
onehot[index] = 1
|
| 124 |
+
onehot_dict[key] = onehot
|
| 125 |
+
# [N_atom, 4, 64]
|
| 126 |
+
mol_encode = []
|
| 127 |
+
for atom_name in atom_names:
|
| 128 |
+
# [4, 64]
|
| 129 |
+
atom_encode = []
|
| 130 |
+
for name_str in atom_name.ljust(4):
|
| 131 |
+
atom_encode.append(onehot_dict[ord(name_str) - 32])
|
| 132 |
+
mol_encode.append(atom_encode)
|
| 133 |
+
onehot_tensor = torch.Tensor(mol_encode)
|
| 134 |
+
return onehot_tensor
|
| 135 |
+
|
| 136 |
+
@staticmethod
|
| 137 |
+
def get_prot_nuc_frame(token: Token, centre_atom: Atom) -> tuple[int, list[int]]:
|
| 138 |
+
"""
|
| 139 |
+
Ref: AlphaFold3 SI Chapter 4.3.2
|
| 140 |
+
For proteins/DNA/RNA, we use the three atoms [N, CA, C] / [C1', C3', C4']
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
token (Token): Token object.
|
| 144 |
+
centre_atom (Atom): Biotite Atom object of Token centre atom.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
has_frame (int): 1 if the token has frame, 0 otherwise.
|
| 148 |
+
frame_atom_index (List[int]): The index of the atoms used to construct the frame.
|
| 149 |
+
"""
|
| 150 |
+
if centre_atom.mol_type == "protein":
|
| 151 |
+
# For protein
|
| 152 |
+
abc_atom_name = ["N", "CA", "C"]
|
| 153 |
+
else:
|
| 154 |
+
# For DNA and RNA
|
| 155 |
+
abc_atom_name = [r"C1'", r"C3'", r"C4'"]
|
| 156 |
+
|
| 157 |
+
idx_in_atom_indices = []
|
| 158 |
+
for i in abc_atom_name:
|
| 159 |
+
if centre_atom.mol_type == "protein" and "N" not in token.atom_names:
|
| 160 |
+
return 0, [-1, -1, -1]
|
| 161 |
+
elif centre_atom.mol_type != "protein" and "C1'" not in token.atom_names:
|
| 162 |
+
return 0, [-1, -1, -1]
|
| 163 |
+
idx_in_atom_indices.append(token.atom_names.index(i))
|
| 164 |
+
# Protein/DNA/RNA always has frame
|
| 165 |
+
has_frame = 1
|
| 166 |
+
frame_atom_index = [token.atom_indices[i] for i in idx_in_atom_indices]
|
| 167 |
+
return has_frame, frame_atom_index
|
| 168 |
+
|
| 169 |
+
@staticmethod
|
| 170 |
+
def get_lig_frame(
|
| 171 |
+
token: Token,
|
| 172 |
+
centre_atom: Atom,
|
| 173 |
+
lig_res_ref_conf_kdtree: dict[str, tuple[KDTree, list[int]]],
|
| 174 |
+
ref_pos: torch.Tensor,
|
| 175 |
+
ref_mask: torch.Tensor,
|
| 176 |
+
) -> tuple[int, list[int]]:
|
| 177 |
+
"""
|
| 178 |
+
Ref: AlphaFold3 SI Chapter 4.3.2
|
| 179 |
+
For ligands, we use the reference conformer of the ligand to construct the frame.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
token (Token): Token object.
|
| 183 |
+
centre_atom (Atom): Biotite Atom object of Token centre atom.
|
| 184 |
+
lig_res_ref_conf_kdtree (Dict[str, Tuple[KDTree, List[int]]]): A dictionary of KDTree objects and atom indices.
|
| 185 |
+
ref_pos (torch.Tensor): Atom positions in the reference conformer. Size=[N_atom, 3]
|
| 186 |
+
ref_mask (torch.Tensor): Mask indicating which atom slots are used in the reference conformer. Size=[N_atom]
|
| 187 |
+
|
| 188 |
+
Returns:
|
| 189 |
+
tuple[int, List[int]]:
|
| 190 |
+
has_frame (int): 1 if the token has frame, 0 otherwise.
|
| 191 |
+
frame_atom_index (List[int]): The index of the atoms used to construct the frame.
|
| 192 |
+
"""
|
| 193 |
+
kdtree, atom_ids = lig_res_ref_conf_kdtree[centre_atom.ref_space_uid]
|
| 194 |
+
b_ref_pos = ref_pos[token.centre_atom_index]
|
| 195 |
+
b_idx = token.centre_atom_index
|
| 196 |
+
if kdtree is None:
|
| 197 |
+
# Atom num < 3
|
| 198 |
+
frame_atom_index = [-1, b_idx, -1]
|
| 199 |
+
has_frame = 0
|
| 200 |
+
else:
|
| 201 |
+
_dist, ind = kdtree.query([b_ref_pos], k=3)
|
| 202 |
+
a_idx, c_idx = atom_ids[ind[0][1]], atom_ids[ind[0][2]]
|
| 203 |
+
frame_atom_index = [a_idx, b_idx, c_idx]
|
| 204 |
+
|
| 205 |
+
# Check if reference confomrer vaild
|
| 206 |
+
has_frame = all([ref_mask[idx] for idx in frame_atom_index])
|
| 207 |
+
|
| 208 |
+
# Colinear check
|
| 209 |
+
if has_frame:
|
| 210 |
+
theta_degrees = angle_3p(*[ref_pos[idx] for idx in frame_atom_index])
|
| 211 |
+
if theta_degrees <= 25 or theta_degrees >= 155:
|
| 212 |
+
has_frame = 0
|
| 213 |
+
return has_frame, frame_atom_index
|
| 214 |
+
|
| 215 |
+
@staticmethod
|
| 216 |
+
def get_token_frame(
|
| 217 |
+
token_array: TokenArray,
|
| 218 |
+
atom_array: AtomArray,
|
| 219 |
+
ref_pos: torch.Tensor,
|
| 220 |
+
ref_mask: torch.Tensor,
|
| 221 |
+
) -> TokenArray:
|
| 222 |
+
"""
|
| 223 |
+
Ref: AlphaFold3 SI Chapter 4.3.2
|
| 224 |
+
The atoms (a_i, b_i, c_i) used to construct token i’s frame depend on the chain type of i:
|
| 225 |
+
Protein tokens use their residue’s backbone (N, Cα, C),
|
| 226 |
+
while DNA and RNA tokens use (C1′, C3′, C4′) atoms of their residue.
|
| 227 |
+
All other tokens (small molecules, glycans, ions) contain only one atom per token.
|
| 228 |
+
The token atom is assigned to b_i, the closest atom to the token atom is a_i,
|
| 229 |
+
and the second closest atom to the token atom is c_i.
|
| 230 |
+
If this set of three atoms is close to colinear (less than 25 degree deviation),
|
| 231 |
+
or if three atoms do not exist in the chain (e.g. a sodium ion),
|
| 232 |
+
then the frame is marked as invalid.
|
| 233 |
+
|
| 234 |
+
Note: frames constucted from reference conformer
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
token_array (TokenArray): A list of tokens.
|
| 238 |
+
atom_array (AtomArray): An atom array.
|
| 239 |
+
ref_pos (torch.Tensor): Atom positions in the reference conformer. Size=[N_atom, 3]
|
| 240 |
+
ref_mask (torch.Tensor): Mask indicating which atom slots are used in the reference conformer. Size=[N_atom]
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
TokenArray: A TokenArray with updated frame annotations.
|
| 244 |
+
- has_frame: 1 if the token has frame, 0 otherwise.
|
| 245 |
+
- frame_atom_index: The index of the atoms used to construct the frame.
|
| 246 |
+
"""
|
| 247 |
+
token_array_w_frame = copy.deepcopy(token_array)
|
| 248 |
+
|
| 249 |
+
# Construct a KDTree for queries to avoid redundant distance calculations
|
| 250 |
+
lig_res_ref_conf_kdtree = {}
|
| 251 |
+
# Ligand and non-standard residues need to use ref to identify frames
|
| 252 |
+
lig_atom_array = atom_array[
|
| 253 |
+
(atom_array.mol_type == "ligand")
|
| 254 |
+
| (~np.isin(atom_array.res_name, list(STD_RESIDUES.keys())))
|
| 255 |
+
]
|
| 256 |
+
for ref_space_uid in np.unique(lig_atom_array.ref_space_uid):
|
| 257 |
+
# The ref_space_uid is the unique identifier ID for each residue.
|
| 258 |
+
atom_ids = np.where(atom_array.ref_space_uid == ref_space_uid)[0]
|
| 259 |
+
if len(atom_ids) >= 3:
|
| 260 |
+
kdtree = KDTree(ref_pos[atom_ids], metric="euclidean")
|
| 261 |
+
else:
|
| 262 |
+
# Invalid frame
|
| 263 |
+
kdtree = None
|
| 264 |
+
lig_res_ref_conf_kdtree[ref_space_uid] = (kdtree, atom_ids)
|
| 265 |
+
|
| 266 |
+
has_frame = []
|
| 267 |
+
for token in token_array_w_frame:
|
| 268 |
+
centre_atom = atom_array[token.centre_atom_index]
|
| 269 |
+
if (
|
| 270 |
+
centre_atom.mol_type != "ligand"
|
| 271 |
+
and centre_atom.res_name in STD_RESIDUES
|
| 272 |
+
):
|
| 273 |
+
has_frame, frame_atom_index = Featurizer.get_prot_nuc_frame(
|
| 274 |
+
token, centre_atom
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
else:
|
| 278 |
+
has_frame, frame_atom_index = Featurizer.get_lig_frame(
|
| 279 |
+
token, centre_atom, lig_res_ref_conf_kdtree, ref_pos, ref_mask
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
token.has_frame = has_frame
|
| 283 |
+
token.frame_atom_index = frame_atom_index
|
| 284 |
+
return token_array_w_frame
|
| 285 |
+
|
| 286 |
+
def get_token_features(self) -> dict[str, torch.Tensor]:
|
| 287 |
+
"""
|
| 288 |
+
Ref: AlphaFold3 SI Chapter 2.8
|
| 289 |
+
|
| 290 |
+
Get token features.
|
| 291 |
+
The size of these features is [N_token].
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
Dict[str, torch.Tensor]: A dict of token features.
|
| 295 |
+
"""
|
| 296 |
+
token_features = {}
|
| 297 |
+
|
| 298 |
+
centre_atoms_indices = self.cropped_token_array.get_annotation(
|
| 299 |
+
"centre_atom_index"
|
| 300 |
+
)
|
| 301 |
+
centre_atoms = self.cropped_atom_array[centre_atoms_indices]
|
| 302 |
+
|
| 303 |
+
restype = centre_atoms.cano_seq_resname
|
| 304 |
+
restype_onehot = self.restype_onehot_encoded(restype)
|
| 305 |
+
|
| 306 |
+
token_features["token_index"] = torch.arange(0, len(self.cropped_token_array))
|
| 307 |
+
token_features["residue_index"] = torch.Tensor(
|
| 308 |
+
centre_atoms.res_id.astype(int)
|
| 309 |
+
).long()
|
| 310 |
+
token_features["asym_id"] = torch.Tensor(centre_atoms.asym_id_int).long()
|
| 311 |
+
token_features["entity_id"] = torch.Tensor(centre_atoms.entity_id_int).long()
|
| 312 |
+
token_features["sym_id"] = torch.Tensor(centre_atoms.sym_id_int).long()
|
| 313 |
+
token_features["restype"] = restype_onehot
|
| 314 |
+
|
| 315 |
+
return token_features
|
| 316 |
+
|
| 317 |
+
def get_chain_perm_features(self) -> dict[str, torch.Tensor]:
|
| 318 |
+
"""
|
| 319 |
+
The chain permutation use "entity_mol_id", "mol_id" and "mol_atom_index"
|
| 320 |
+
instead of the "entity_id", "asym_id" and "residue_index".
|
| 321 |
+
|
| 322 |
+
The shape of these features is [N_atom].
|
| 323 |
+
|
| 324 |
+
Returns:
|
| 325 |
+
Dict[str, torch.Tensor]: A dict of chain permutation features.
|
| 326 |
+
"""
|
| 327 |
+
|
| 328 |
+
chain_perm_features = {}
|
| 329 |
+
chain_perm_features["mol_id"] = torch.Tensor(
|
| 330 |
+
self.cropped_atom_array.mol_id
|
| 331 |
+
).long()
|
| 332 |
+
chain_perm_features["mol_atom_index"] = torch.Tensor(
|
| 333 |
+
self.cropped_atom_array.mol_atom_index
|
| 334 |
+
).long()
|
| 335 |
+
chain_perm_features["entity_mol_id"] = torch.Tensor(
|
| 336 |
+
self.cropped_atom_array.entity_mol_id
|
| 337 |
+
).long()
|
| 338 |
+
return chain_perm_features
|
| 339 |
+
|
| 340 |
+
def get_renamed_atom_names(self) -> np.ndarray:
|
| 341 |
+
"""
|
| 342 |
+
Rename the atom names of ligands to avioid information leakage.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
np.ndarray: A numpy array of renamed atom names.
|
| 346 |
+
"""
|
| 347 |
+
res_starts = get_residue_starts(
|
| 348 |
+
self.cropped_atom_array, add_exclusive_stop=True
|
| 349 |
+
)
|
| 350 |
+
new_atom_names = copy.deepcopy(self.cropped_atom_array.atom_name)
|
| 351 |
+
for start, stop in zip(res_starts[:-1], res_starts[1:]):
|
| 352 |
+
res_mol_type = self.cropped_atom_array.mol_type[start]
|
| 353 |
+
if res_mol_type != "ligand":
|
| 354 |
+
continue
|
| 355 |
+
|
| 356 |
+
elem_count = defaultdict(int)
|
| 357 |
+
new_res_atom_names = []
|
| 358 |
+
for elem in self.cropped_atom_array.element[start:stop]:
|
| 359 |
+
elem_count[elem] += 1
|
| 360 |
+
new_res_atom_names.append(f"{elem.upper()}{elem_count[elem]}")
|
| 361 |
+
new_atom_names[start:stop] = new_res_atom_names
|
| 362 |
+
return new_atom_names
|
| 363 |
+
|
| 364 |
+
def get_reference_features(self) -> dict[str, torch.Tensor]:
|
| 365 |
+
"""
|
| 366 |
+
Ref: AlphaFold3 SI Chapter 2.8
|
| 367 |
+
|
| 368 |
+
Get reference features.
|
| 369 |
+
The size of these features is [N_atom].
|
| 370 |
+
|
| 371 |
+
Returns:
|
| 372 |
+
Dict[str, torch.Tensor]: a dict of reference features.
|
| 373 |
+
"""
|
| 374 |
+
ref_pos = []
|
| 375 |
+
for ref_space_uid in np.unique(self.cropped_atom_array.ref_space_uid):
|
| 376 |
+
res_ref_pos = random_transform(
|
| 377 |
+
self.cropped_atom_array.ref_pos[
|
| 378 |
+
self.cropped_atom_array.ref_space_uid == ref_space_uid,
|
| 379 |
+
],
|
| 380 |
+
apply_augmentation=self.ref_pos_augment,
|
| 381 |
+
centralize=True,
|
| 382 |
+
)
|
| 383 |
+
ref_pos.append(res_ref_pos)
|
| 384 |
+
ref_pos = np.concatenate(ref_pos)
|
| 385 |
+
|
| 386 |
+
ref_features = {}
|
| 387 |
+
ref_features["ref_pos"] = torch.Tensor(ref_pos)
|
| 388 |
+
ref_features["ref_mask"] = torch.Tensor(self.cropped_atom_array.ref_mask).long()
|
| 389 |
+
ref_features["ref_element"] = Featurizer.elem_onehot_encoded(
|
| 390 |
+
self.cropped_atom_array.element
|
| 391 |
+
).long()
|
| 392 |
+
ref_features["ref_charge"] = torch.Tensor(
|
| 393 |
+
self.cropped_atom_array.ref_charge
|
| 394 |
+
).long()
|
| 395 |
+
|
| 396 |
+
if self.lig_atom_rename:
|
| 397 |
+
atom_names = self.get_renamed_atom_names()
|
| 398 |
+
else:
|
| 399 |
+
atom_names = self.cropped_atom_array.atom_name
|
| 400 |
+
|
| 401 |
+
ref_features["ref_atom_name_chars"] = Featurizer.ref_atom_name_chars_encoded(
|
| 402 |
+
atom_names
|
| 403 |
+
).long()
|
| 404 |
+
ref_features["ref_space_uid"] = torch.Tensor(
|
| 405 |
+
self.cropped_atom_array.ref_space_uid
|
| 406 |
+
).long()
|
| 407 |
+
|
| 408 |
+
token_array_with_frame = self.get_token_frame(
|
| 409 |
+
token_array=self.cropped_token_array,
|
| 410 |
+
atom_array=self.cropped_atom_array,
|
| 411 |
+
ref_pos=ref_features["ref_pos"],
|
| 412 |
+
ref_mask=ref_features["ref_mask"],
|
| 413 |
+
)
|
| 414 |
+
ref_features["has_frame"] = torch.Tensor(
|
| 415 |
+
token_array_with_frame.get_annotation("has_frame")
|
| 416 |
+
).long() # [N_token]
|
| 417 |
+
ref_features["frame_atom_index"] = torch.Tensor(
|
| 418 |
+
token_array_with_frame.get_annotation("frame_atom_index")
|
| 419 |
+
).long() # [N_token, 3]
|
| 420 |
+
return ref_features
|
| 421 |
+
|
| 422 |
+
def get_bond_features(self) -> dict[str, torch.Tensor]:
|
| 423 |
+
"""
|
| 424 |
+
Ref: AlphaFold3 SI Chapter 2.8
|
| 425 |
+
A 2D matrix indicating if there is a bond between any atom in token i and token j,
|
| 426 |
+
restricted to just polymer-ligand and ligand-ligand bonds and bonds less than 2.4 Å during training.
|
| 427 |
+
The size of bond feature is [N_token, N_token].
|
| 428 |
+
|
| 429 |
+
Returns:
|
| 430 |
+
Dict[str, torch.Tensor]: A dict of bond features.
|
| 431 |
+
"""
|
| 432 |
+
bond_features = {}
|
| 433 |
+
num_tokens = len(self.cropped_token_array)
|
| 434 |
+
adj_matrix = self.cropped_atom_array.bonds.adjacency_matrix().astype(int)
|
| 435 |
+
|
| 436 |
+
token_adj_matrix = np.zeros((num_tokens, num_tokens), dtype=int)
|
| 437 |
+
atom_bond_mask = adj_matrix > 0
|
| 438 |
+
|
| 439 |
+
for i in range(num_tokens):
|
| 440 |
+
atoms_i = self.cropped_token_array[i].atom_indices
|
| 441 |
+
token_i_mol_type = self.cropped_atom_array.mol_type[atoms_i[0]]
|
| 442 |
+
token_i_res_name = self.cropped_atom_array.res_name[atoms_i[0]]
|
| 443 |
+
token_i_ref_space_uid = self.cropped_atom_array.ref_space_uid[atoms_i[0]]
|
| 444 |
+
unstd_res_token_i = (
|
| 445 |
+
token_i_res_name not in STD_RESIDUES and token_i_mol_type != "ligand"
|
| 446 |
+
)
|
| 447 |
+
is_polymer_i = token_i_mol_type in ["protein", "dna", "rna"]
|
| 448 |
+
|
| 449 |
+
for j in range(i + 1, num_tokens):
|
| 450 |
+
atoms_j = self.cropped_token_array[j].atom_indices
|
| 451 |
+
token_j_mol_type = self.cropped_atom_array.mol_type[atoms_j[0]]
|
| 452 |
+
token_j_res_name = self.cropped_atom_array.res_name[atoms_j[0]]
|
| 453 |
+
token_j_ref_space_uid = self.cropped_atom_array.ref_space_uid[
|
| 454 |
+
atoms_j[0]
|
| 455 |
+
]
|
| 456 |
+
unstd_res_token_j = (
|
| 457 |
+
token_j_res_name not in STD_RESIDUES
|
| 458 |
+
and token_j_mol_type != "ligand"
|
| 459 |
+
)
|
| 460 |
+
is_polymer_j = token_j_mol_type in ["protein", "dna", "rna"]
|
| 461 |
+
|
| 462 |
+
# The polymer-polymer (std-std, std-unstd, and inter-unstd) bond will not be included in token_bonds.
|
| 463 |
+
if is_polymer_i and is_polymer_j:
|
| 464 |
+
is_same_res = token_i_ref_space_uid == token_j_ref_space_uid
|
| 465 |
+
unstd_res_bonds = unstd_res_token_i and unstd_res_token_j
|
| 466 |
+
if not (is_same_res and unstd_res_bonds):
|
| 467 |
+
continue
|
| 468 |
+
|
| 469 |
+
sub_matrix = atom_bond_mask[np.ix_(atoms_i, atoms_j)]
|
| 470 |
+
if np.any(sub_matrix):
|
| 471 |
+
token_adj_matrix[i, j] = 1
|
| 472 |
+
token_adj_matrix[j, i] = 1
|
| 473 |
+
bond_features["token_bonds"] = torch.Tensor(token_adj_matrix)
|
| 474 |
+
return bond_features
|
| 475 |
+
|
| 476 |
+
def get_extra_features(self) -> dict[str, torch.Tensor]:
|
| 477 |
+
"""
|
| 478 |
+
Get other features not listed in AlphaFold3 SI Chapter 2.8 Table 5.
|
| 479 |
+
The size of these features is [N_atom].
|
| 480 |
+
|
| 481 |
+
Returns:
|
| 482 |
+
Dict[str, torch.Tensor]: a dict of extra features.
|
| 483 |
+
"""
|
| 484 |
+
atom_to_token_idx_dict = {}
|
| 485 |
+
for idx, token in enumerate(self.cropped_token_array.tokens):
|
| 486 |
+
for atom_idx in token.atom_indices:
|
| 487 |
+
atom_to_token_idx_dict[atom_idx] = idx
|
| 488 |
+
|
| 489 |
+
# Ensure the order of the atom_to_token_idx is the same as the atom_array
|
| 490 |
+
atom_to_token_idx = [
|
| 491 |
+
atom_to_token_idx_dict[atom_idx]
|
| 492 |
+
for atom_idx in range(len(self.cropped_atom_array))
|
| 493 |
+
]
|
| 494 |
+
|
| 495 |
+
extra_features = {}
|
| 496 |
+
extra_features["atom_to_token_idx"] = torch.Tensor(atom_to_token_idx).long()
|
| 497 |
+
extra_features["atom_to_tokatom_idx"] = torch.Tensor(
|
| 498 |
+
self.cropped_atom_array.tokatom_idx
|
| 499 |
+
).long()
|
| 500 |
+
|
| 501 |
+
extra_features["is_protein"] = torch.Tensor(
|
| 502 |
+
self.cropped_atom_array.is_protein
|
| 503 |
+
).long()
|
| 504 |
+
extra_features["is_ligand"] = torch.Tensor(
|
| 505 |
+
self.cropped_atom_array.is_ligand
|
| 506 |
+
).long()
|
| 507 |
+
extra_features["is_dna"] = torch.Tensor(self.cropped_atom_array.is_dna).long()
|
| 508 |
+
extra_features["is_rna"] = torch.Tensor(self.cropped_atom_array.is_rna).long()
|
| 509 |
+
if "resolution" in self.cropped_atom_array._annot:
|
| 510 |
+
extra_features["resolution"] = torch.Tensor(
|
| 511 |
+
[self.cropped_atom_array.resolution[0]]
|
| 512 |
+
)
|
| 513 |
+
else:
|
| 514 |
+
extra_features["resolution"] = torch.Tensor([-1])
|
| 515 |
+
return extra_features
|
| 516 |
+
|
| 517 |
+
@staticmethod
|
| 518 |
+
def get_lig_pocket_mask(
|
| 519 |
+
atom_array: AtomArray, lig_label_asym_id: Union[str, list]
|
| 520 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 521 |
+
"""
|
| 522 |
+
Ref: AlphaFold3 Chapter Methods.Metrics
|
| 523 |
+
|
| 524 |
+
the pocket is defined as all heavy atoms within 10 Å of any heavy atom of the ligand,
|
| 525 |
+
restricted to the primary polymer chain for the ligand or modified residue being scored,
|
| 526 |
+
and further restricted to only backbone atoms for proteins. The primary polymer chain is defined variously:
|
| 527 |
+
for PoseBusters it is the protein chain with the most atoms within 10 Å of the ligand,
|
| 528 |
+
for bonded ligand scores it is the bonded polymer chain and for modified residues it
|
| 529 |
+
is the chain that the residue is contained in (minus that residue).
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
atom_array (AtomArray): atoms in the complex.
|
| 533 |
+
lig_label_asym_id (Union[str, List]): The label_asym_id of the ligand of interest.
|
| 534 |
+
|
| 535 |
+
Returns:
|
| 536 |
+
tuple[torch.Tensor, torch.Tensor]: A tuple of ligand pocket mask and pocket mask.
|
| 537 |
+
"""
|
| 538 |
+
|
| 539 |
+
if isinstance(lig_label_asym_id, str):
|
| 540 |
+
lig_label_asym_ids = [lig_label_asym_id]
|
| 541 |
+
else:
|
| 542 |
+
lig_label_asym_ids = list(lig_label_asym_id)
|
| 543 |
+
|
| 544 |
+
# Get backbone mask
|
| 545 |
+
prot_backbone = (
|
| 546 |
+
atom_array.is_protein & np.isin(atom_array.atom_name, ["C", "N", "CA"])
|
| 547 |
+
).astype(bool)
|
| 548 |
+
|
| 549 |
+
kdtree = KDTree(atom_array.coord)
|
| 550 |
+
|
| 551 |
+
ligand_mask_list = []
|
| 552 |
+
pocket_mask_list = []
|
| 553 |
+
for lig_label_asym_id in lig_label_asym_ids:
|
| 554 |
+
assert np.isin(
|
| 555 |
+
lig_label_asym_id, atom_array.label_asym_id
|
| 556 |
+
), f"{lig_label_asym_id} is not in the label_asym_id of the cropped atom array."
|
| 557 |
+
|
| 558 |
+
ligand_mask = atom_array.label_asym_id == lig_label_asym_id
|
| 559 |
+
lig_pos = atom_array.coord[ligand_mask]
|
| 560 |
+
|
| 561 |
+
# Get atoms in 10 Angstrom radius
|
| 562 |
+
near_atom_indices = np.unique(
|
| 563 |
+
np.concatenate(kdtree.query_radius(lig_pos, 10.0))
|
| 564 |
+
)
|
| 565 |
+
near_atoms = [
|
| 566 |
+
True if i in near_atom_indices else False
|
| 567 |
+
for i in range(len(atom_array))
|
| 568 |
+
]
|
| 569 |
+
|
| 570 |
+
# Get primary chain (protein backone in 10 Angstrom radius)
|
| 571 |
+
primary_chain_candidates = near_atoms & prot_backbone
|
| 572 |
+
primary_chain_candidates_atoms = atom_array[primary_chain_candidates]
|
| 573 |
+
|
| 574 |
+
max_atom = 0
|
| 575 |
+
primary_chain_asym_id_int = None
|
| 576 |
+
for asym_id_int in np.unique(primary_chain_candidates_atoms.asym_id_int):
|
| 577 |
+
n_atoms = np.sum(
|
| 578 |
+
primary_chain_candidates_atoms.asym_id_int == asym_id_int
|
| 579 |
+
)
|
| 580 |
+
if n_atoms > max_atom:
|
| 581 |
+
max_atom = n_atoms
|
| 582 |
+
primary_chain_asym_id_int = asym_id_int
|
| 583 |
+
assert (
|
| 584 |
+
primary_chain_asym_id_int is not None
|
| 585 |
+
), f"No primary chain found for ligand ({lig_label_asym_id=})."
|
| 586 |
+
|
| 587 |
+
pocket_mask = primary_chain_candidates & (
|
| 588 |
+
atom_array.asym_id_int == primary_chain_asym_id_int
|
| 589 |
+
)
|
| 590 |
+
ligand_mask_list.append(ligand_mask)
|
| 591 |
+
pocket_mask_list.append(pocket_mask)
|
| 592 |
+
|
| 593 |
+
ligand_mask_by_pockets = torch.Tensor(
|
| 594 |
+
np.array(ligand_mask_list).astype(int)
|
| 595 |
+
).long()
|
| 596 |
+
pocket_mask_by_pockets = torch.Tensor(
|
| 597 |
+
np.array(pocket_mask_list).astype(int)
|
| 598 |
+
).long()
|
| 599 |
+
return ligand_mask_by_pockets, pocket_mask_by_pockets
|
| 600 |
+
|
| 601 |
+
def get_mask_features(self) -> dict[str, torch.Tensor]:
|
| 602 |
+
"""
|
| 603 |
+
Generate mask features for the cropped atom array.
|
| 604 |
+
|
| 605 |
+
Returns:
|
| 606 |
+
Dict[str, torch.Tensor]: A dictionary containing various mask features.
|
| 607 |
+
"""
|
| 608 |
+
mask_features = {}
|
| 609 |
+
|
| 610 |
+
mask_features["pae_rep_atom_mask"] = torch.Tensor(
|
| 611 |
+
self.cropped_atom_array.centre_atom_mask
|
| 612 |
+
).long()
|
| 613 |
+
|
| 614 |
+
mask_features["plddt_m_rep_atom_mask"] = torch.Tensor(
|
| 615 |
+
self.cropped_atom_array.plddt_m_rep_atom_mask
|
| 616 |
+
).long() # [N_atom]
|
| 617 |
+
|
| 618 |
+
mask_features["distogram_rep_atom_mask"] = torch.Tensor(
|
| 619 |
+
self.cropped_atom_array.distogram_rep_atom_mask
|
| 620 |
+
).long() # [N_atom]
|
| 621 |
+
|
| 622 |
+
mask_features["modified_res_mask"] = torch.Tensor(
|
| 623 |
+
self.cropped_atom_array.modified_res_mask
|
| 624 |
+
).long()
|
| 625 |
+
|
| 626 |
+
lig_polymer_bonds = get_ligand_polymer_bond_mask(self.cropped_atom_array)
|
| 627 |
+
num_atoms = len(self.cropped_atom_array)
|
| 628 |
+
bond_mask_mat = np.zeros((num_atoms, num_atoms))
|
| 629 |
+
for i, j, _ in lig_polymer_bonds:
|
| 630 |
+
bond_mask_mat[i, j] = 1
|
| 631 |
+
bond_mask_mat[j, i] = 1
|
| 632 |
+
mask_features["bond_mask"] = torch.Tensor(
|
| 633 |
+
bond_mask_mat
|
| 634 |
+
).long() # [N_atom, N_atom]
|
| 635 |
+
return mask_features
|
| 636 |
+
|
| 637 |
+
def get_all_input_features(self):
|
| 638 |
+
"""
|
| 639 |
+
Get input features from cropped data.
|
| 640 |
+
|
| 641 |
+
Returns:
|
| 642 |
+
Dict[str, torch.Tensor]: a dict of features.
|
| 643 |
+
"""
|
| 644 |
+
features = {}
|
| 645 |
+
token_features = self.get_token_features()
|
| 646 |
+
features.update(token_features)
|
| 647 |
+
|
| 648 |
+
bond_features = self.get_bond_features()
|
| 649 |
+
features.update(bond_features)
|
| 650 |
+
|
| 651 |
+
reference_features = self.get_reference_features()
|
| 652 |
+
features.update(reference_features)
|
| 653 |
+
|
| 654 |
+
extra_features = self.get_extra_features()
|
| 655 |
+
features.update(extra_features)
|
| 656 |
+
|
| 657 |
+
chain_perm_features = self.get_chain_perm_features()
|
| 658 |
+
features.update(chain_perm_features)
|
| 659 |
+
|
| 660 |
+
mask_features = self.get_mask_features()
|
| 661 |
+
features.update(mask_features)
|
| 662 |
+
return features
|
| 663 |
+
|
| 664 |
+
def get_labels(self) -> dict[str, torch.Tensor]:
|
| 665 |
+
"""
|
| 666 |
+
Get the input labels required for the training phase.
|
| 667 |
+
|
| 668 |
+
Returns:
|
| 669 |
+
Dict[str, torch.Tensor]: a dict of labels.
|
| 670 |
+
"""
|
| 671 |
+
|
| 672 |
+
labels = {}
|
| 673 |
+
|
| 674 |
+
labels["coordinate"] = torch.Tensor(
|
| 675 |
+
self.cropped_atom_array.coord
|
| 676 |
+
) # [N_atom, 3]
|
| 677 |
+
|
| 678 |
+
labels["coordinate_mask"] = torch.Tensor(
|
| 679 |
+
self.cropped_atom_array.is_resolved.astype(int)
|
| 680 |
+
).long() # [N_atom]
|
| 681 |
+
return labels
|
| 682 |
+
|
| 683 |
+
def get_atom_permutation_list(
|
| 684 |
+
self,
|
| 685 |
+
) -> list[list[int]]:
|
| 686 |
+
"""
|
| 687 |
+
Generate info of permutations.
|
| 688 |
+
|
| 689 |
+
Returns:
|
| 690 |
+
List[List[int]]: a list of atom permutations.
|
| 691 |
+
"""
|
| 692 |
+
atom_perm_list = []
|
| 693 |
+
for i in self.cropped_atom_array.res_perm:
|
| 694 |
+
# Decode list[str] -> list[list[int]]
|
| 695 |
+
atom_perm_list.append([int(j) for j in i.split("_")])
|
| 696 |
+
|
| 697 |
+
# Atoms connected to different residue are fixed.
|
| 698 |
+
# Bonds array: [[atom_idx_i, atom_idx_j, bond_type]]
|
| 699 |
+
idx_i = self.cropped_atom_array.bonds._bonds[:, 0]
|
| 700 |
+
idx_j = self.cropped_atom_array.bonds._bonds[:, 1]
|
| 701 |
+
diff_mask = (
|
| 702 |
+
self.cropped_atom_array.ref_space_uid[idx_i]
|
| 703 |
+
!= self.cropped_atom_array.ref_space_uid[idx_j]
|
| 704 |
+
)
|
| 705 |
+
inter_residue_bonds = self.cropped_atom_array.bonds._bonds[diff_mask]
|
| 706 |
+
fixed_atom_mask = np.isin(
|
| 707 |
+
np.arange(len(self.cropped_atom_array)),
|
| 708 |
+
np.unique(inter_residue_bonds[:, :2]),
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
# Get fixed atom permutation for each residue.
|
| 712 |
+
fixed_atom_perm_list = []
|
| 713 |
+
res_starts = get_residue_starts(
|
| 714 |
+
self.cropped_atom_array, add_exclusive_stop=True
|
| 715 |
+
)
|
| 716 |
+
for r_start, r_stop in zip(res_starts[:-1], res_starts[1:]):
|
| 717 |
+
atom_res_perm = np.array(
|
| 718 |
+
atom_perm_list[r_start:r_stop]
|
| 719 |
+
) # [N_res_atoms, N_res_perm]
|
| 720 |
+
res_fixed_atom_mask = fixed_atom_mask[r_start:r_stop]
|
| 721 |
+
|
| 722 |
+
if np.sum(res_fixed_atom_mask) == 0:
|
| 723 |
+
# If all atoms in the residue are not fixed, e.g. ions
|
| 724 |
+
fixed_atom_perm_list.extend(atom_res_perm.tolist())
|
| 725 |
+
continue
|
| 726 |
+
|
| 727 |
+
# Create a [N_res_atoms, N_res_perm] template of indices
|
| 728 |
+
n_res_atoms, n_perm = atom_res_perm.shape
|
| 729 |
+
indices_template = (
|
| 730 |
+
atom_res_perm[:, 0].reshape(n_res_atoms, 1).repeat(n_perm, axis=1)
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
# Identify the column where the positions of the fixed atoms remain unchanged
|
| 734 |
+
fixed_atom_perm = atom_res_perm[
|
| 735 |
+
res_fixed_atom_mask
|
| 736 |
+
] # [N_fixed_res_atoms, N_res_perm]
|
| 737 |
+
fixed_indices_template = indices_template[
|
| 738 |
+
res_fixed_atom_mask
|
| 739 |
+
] # [N_fixed_res_atoms, N_res_perm]
|
| 740 |
+
unchanged_columns_mask = np.all(
|
| 741 |
+
fixed_atom_perm == fixed_indices_template, axis=0
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
# Remove the columns related to the position changes of fixed atoms.
|
| 745 |
+
fiedx_atom_res_perm = atom_res_perm[:, unchanged_columns_mask]
|
| 746 |
+
fixed_atom_perm_list.extend(fiedx_atom_res_perm.tolist())
|
| 747 |
+
return fixed_atom_perm_list
|
| 748 |
+
|
| 749 |
+
@staticmethod
|
| 750 |
+
def get_gt_full_complex_features(
|
| 751 |
+
atom_array: AtomArray,
|
| 752 |
+
cropped_atom_array: AtomArray = None,
|
| 753 |
+
get_cropped_asym_only: bool = True,
|
| 754 |
+
) -> dict[str, torch.Tensor]:
|
| 755 |
+
"""Get full ground truth complex features.
|
| 756 |
+
It is used for multi-chain permutation alignment.
|
| 757 |
+
|
| 758 |
+
Args:
|
| 759 |
+
atom_array (AtomArray): all atoms in the complex.
|
| 760 |
+
cropped_atom_array (AtomArray, optional): cropped atoms. Defaults to None.
|
| 761 |
+
get_cropped_asym_only (bool, optional): Defaults to True.
|
| 762 |
+
- If true, a chain is returned only if its asym_id (mol_id) appears in the
|
| 763 |
+
cropped_atom_array. It should be a favored setting for the spatial cropping.
|
| 764 |
+
- If false, a chain is returned if its entity_id (entity_mol_id) appears in
|
| 765 |
+
the cropped_atom_array.
|
| 766 |
+
|
| 767 |
+
Returns:
|
| 768 |
+
Dict[str, torch.Tensor]: a dictionary containing
|
| 769 |
+
coordinate, coordinate_mask, etc.
|
| 770 |
+
"""
|
| 771 |
+
gt_features = {}
|
| 772 |
+
|
| 773 |
+
if cropped_atom_array is not None:
|
| 774 |
+
# Get the cropped part of gt entities
|
| 775 |
+
entity_atom_set = set(
|
| 776 |
+
zip(
|
| 777 |
+
cropped_atom_array.entity_mol_id,
|
| 778 |
+
cropped_atom_array.mol_atom_index,
|
| 779 |
+
)
|
| 780 |
+
)
|
| 781 |
+
mask = [
|
| 782 |
+
(entity, atom) in entity_atom_set
|
| 783 |
+
for (entity, atom) in zip(
|
| 784 |
+
atom_array.entity_mol_id, atom_array.mol_atom_index
|
| 785 |
+
)
|
| 786 |
+
]
|
| 787 |
+
|
| 788 |
+
if get_cropped_asym_only:
|
| 789 |
+
# Restrict to asym chains appeared in cropped_atom_array
|
| 790 |
+
asyms = np.unique(cropped_atom_array.mol_id)
|
| 791 |
+
mask = mask * np.isin(atom_array.mol_id, asyms)
|
| 792 |
+
atom_array = atom_array[mask]
|
| 793 |
+
|
| 794 |
+
gt_features["coordinate"] = torch.Tensor(atom_array.coord)
|
| 795 |
+
gt_features["coordinate_mask"] = torch.Tensor(atom_array.is_resolved).long()
|
| 796 |
+
gt_features["entity_mol_id"] = torch.Tensor(atom_array.entity_mol_id).long()
|
| 797 |
+
gt_features["mol_id"] = torch.Tensor(atom_array.mol_id).long()
|
| 798 |
+
gt_features["mol_atom_index"] = torch.Tensor(atom_array.mol_atom_index).long()
|
| 799 |
+
gt_features["pae_rep_atom_mask"] = torch.Tensor(
|
| 800 |
+
atom_array.centre_atom_mask
|
| 801 |
+
).long()
|
| 802 |
+
return gt_features, atom_array
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/filter.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import biotite.structure as struc
|
| 16 |
+
import numpy as np
|
| 17 |
+
from biotite.structure import AtomArray
|
| 18 |
+
|
| 19 |
+
from protenix.data.constants import CRYSTALLIZATION_AIDS
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Filter(object):
|
| 23 |
+
"""
|
| 24 |
+
Ref: AlphaFold3 SI Chapter 2.5.4
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
@staticmethod
|
| 28 |
+
def remove_hydrogens(atom_array: AtomArray) -> AtomArray:
|
| 29 |
+
"""remove hydrogens and deuteriums"""
|
| 30 |
+
return atom_array[~np.isin(atom_array.element, ["H", "D"])]
|
| 31 |
+
|
| 32 |
+
@staticmethod
|
| 33 |
+
def remove_water(atom_array: AtomArray) -> AtomArray:
|
| 34 |
+
"""remove water (HOH) and deuterated water (DOD)"""
|
| 35 |
+
return atom_array[~np.isin(atom_array.res_name, ["HOH", "DOD"])]
|
| 36 |
+
|
| 37 |
+
@staticmethod
|
| 38 |
+
def remove_element_X(atom_array: AtomArray) -> AtomArray:
|
| 39 |
+
"""
|
| 40 |
+
remove element X
|
| 41 |
+
following residues have element X:
|
| 42 |
+
- UNX: unknown one atom or ion
|
| 43 |
+
- UNL: unknown ligand, some atoms are marked as X
|
| 44 |
+
- ASX: ASP/ASN ambiguous, two ambiguous atoms are marked as X, 6 entries in the PDB
|
| 45 |
+
- GLX: GLU/GLN ambiguous, two ambiguous atoms are marked as X, 5 entries in the PDB
|
| 46 |
+
"""
|
| 47 |
+
X_mask = np.zeros(len(atom_array), dtype=bool)
|
| 48 |
+
starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 49 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 50 |
+
res_name = atom_array.res_name[start]
|
| 51 |
+
if res_name in ["UNX", "UNL"]:
|
| 52 |
+
X_mask[start:stop] = True
|
| 53 |
+
atom_array = atom_array[~X_mask]
|
| 54 |
+
|
| 55 |
+
# map ASX to ASP, as ASP is more symmetric than ASN
|
| 56 |
+
mask = atom_array.res_name == "ASX"
|
| 57 |
+
atom_array.res_name[mask] = "ASP"
|
| 58 |
+
atom_array.atom_name[mask & (atom_array.atom_name == "XD1")] = "OD1"
|
| 59 |
+
atom_array.atom_name[mask & (atom_array.atom_name == "XD2")] = "OD2"
|
| 60 |
+
atom_array.element[mask & (atom_array.element == "X")] = "O"
|
| 61 |
+
|
| 62 |
+
# map GLX to GLU, as GLU is more symmetric than GLN
|
| 63 |
+
mask = atom_array.res_name == "GLX"
|
| 64 |
+
atom_array.res_name[mask] = "GLU"
|
| 65 |
+
atom_array.atom_name[mask & (atom_array.atom_name == "XE1")] = "OE1"
|
| 66 |
+
atom_array.atom_name[mask & (atom_array.atom_name == "XE2")] = "OE2"
|
| 67 |
+
atom_array.element[mask & (atom_array.element == "X")] = "O"
|
| 68 |
+
return atom_array
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def remove_crystallization_aids(
|
| 72 |
+
atom_array: AtomArray, entity_poly_type: dict
|
| 73 |
+
) -> AtomArray:
|
| 74 |
+
"""remove crystallization aids, eg: SO4, GOL, etc.
|
| 75 |
+
|
| 76 |
+
Only remove crystallization aids if the chain is not polymer.
|
| 77 |
+
|
| 78 |
+
Ref: AlphaFold3 SI Chapter 2.5.4
|
| 79 |
+
"""
|
| 80 |
+
non_aids_mask = ~np.isin(atom_array.res_name, CRYSTALLIZATION_AIDS)
|
| 81 |
+
poly_mask = np.isin(atom_array.label_entity_id, list(entity_poly_type.keys()))
|
| 82 |
+
return atom_array[poly_mask | non_aids_mask]
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/infer_data_pipeline.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import logging
|
| 17 |
+
import time
|
| 18 |
+
import traceback
|
| 19 |
+
import warnings
|
| 20 |
+
from typing import Any, Mapping
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from biotite.structure import AtomArray
|
| 24 |
+
from torch.utils.data import DataLoader, Dataset, DistributedSampler
|
| 25 |
+
|
| 26 |
+
from protenix.data.data_pipeline import DataPipeline
|
| 27 |
+
from protenix.data.json_to_feature import SampleDictToFeatures
|
| 28 |
+
from protenix.data.msa_featurizer import InferenceMSAFeaturizer
|
| 29 |
+
from protenix.data.utils import data_type_transform, make_dummy_feature
|
| 30 |
+
from protenix.utils.distributed import DIST_WRAPPER
|
| 31 |
+
from protenix.utils.torch_utils import dict_to_tensor
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
warnings.filterwarnings("ignore", module="biotite")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_inference_dataloader(configs: Any) -> DataLoader:
|
| 39 |
+
"""
|
| 40 |
+
Creates and returns a DataLoader for inference using the InferenceDataset.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
configs: A configuration object containing the necessary parameters for the DataLoader.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A DataLoader object configured for inference.
|
| 47 |
+
"""
|
| 48 |
+
inference_dataset = InferenceDataset(
|
| 49 |
+
input_json_path=configs.input_json_path,
|
| 50 |
+
dump_dir=configs.dump_dir,
|
| 51 |
+
use_msa=configs.use_msa,
|
| 52 |
+
)
|
| 53 |
+
sampler = DistributedSampler(
|
| 54 |
+
dataset=inference_dataset,
|
| 55 |
+
num_replicas=DIST_WRAPPER.world_size,
|
| 56 |
+
rank=DIST_WRAPPER.rank,
|
| 57 |
+
shuffle=False,
|
| 58 |
+
)
|
| 59 |
+
dataloader = DataLoader(
|
| 60 |
+
dataset=inference_dataset,
|
| 61 |
+
batch_size=1,
|
| 62 |
+
sampler=sampler,
|
| 63 |
+
collate_fn=lambda batch: batch,
|
| 64 |
+
num_workers=configs.num_workers,
|
| 65 |
+
)
|
| 66 |
+
return dataloader
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class InferenceDataset(Dataset):
|
| 70 |
+
def __init__(
|
| 71 |
+
self,
|
| 72 |
+
input_json_path: str,
|
| 73 |
+
dump_dir: str,
|
| 74 |
+
use_msa: bool = True,
|
| 75 |
+
) -> None:
|
| 76 |
+
|
| 77 |
+
self.input_json_path = input_json_path
|
| 78 |
+
self.dump_dir = dump_dir
|
| 79 |
+
self.use_msa = use_msa
|
| 80 |
+
with open(self.input_json_path, "r") as f:
|
| 81 |
+
self.inputs = json.load(f)
|
| 82 |
+
|
| 83 |
+
def process_one(
|
| 84 |
+
self,
|
| 85 |
+
single_sample_dict: Mapping[str, Any],
|
| 86 |
+
) -> tuple[dict[str, torch.Tensor], AtomArray, dict[str, float]]:
|
| 87 |
+
"""
|
| 88 |
+
Processes a single sample from the input JSON to generate features and statistics.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
single_sample_dict: A dictionary containing the sample data.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
A tuple containing:
|
| 95 |
+
- A dictionary of features.
|
| 96 |
+
- An AtomArray object.
|
| 97 |
+
- A dictionary of time tracking statistics.
|
| 98 |
+
"""
|
| 99 |
+
# general features
|
| 100 |
+
t0 = time.time()
|
| 101 |
+
sample2feat = SampleDictToFeatures(
|
| 102 |
+
single_sample_dict,
|
| 103 |
+
)
|
| 104 |
+
features_dict, atom_array, token_array = sample2feat.get_feature_dict()
|
| 105 |
+
features_dict["distogram_rep_atom_mask"] = torch.Tensor(
|
| 106 |
+
atom_array.distogram_rep_atom_mask
|
| 107 |
+
).long()
|
| 108 |
+
entity_poly_type = sample2feat.entity_poly_type
|
| 109 |
+
t1 = time.time()
|
| 110 |
+
|
| 111 |
+
# Msa features
|
| 112 |
+
entity_to_asym_id = DataPipeline.get_label_entity_id_to_asym_id_int(atom_array)
|
| 113 |
+
msa_features = (
|
| 114 |
+
InferenceMSAFeaturizer.make_msa_feature(
|
| 115 |
+
bioassembly=single_sample_dict["sequences"],
|
| 116 |
+
entity_to_asym_id=entity_to_asym_id,
|
| 117 |
+
token_array=token_array,
|
| 118 |
+
atom_array=atom_array,
|
| 119 |
+
)
|
| 120 |
+
if self.use_msa
|
| 121 |
+
else {}
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Make dummy features for not implemented features
|
| 125 |
+
dummy_feats = ["template"]
|
| 126 |
+
if len(msa_features) == 0:
|
| 127 |
+
dummy_feats.append("msa")
|
| 128 |
+
else:
|
| 129 |
+
msa_features = dict_to_tensor(msa_features)
|
| 130 |
+
features_dict.update(msa_features)
|
| 131 |
+
features_dict = make_dummy_feature(
|
| 132 |
+
features_dict=features_dict,
|
| 133 |
+
dummy_feats=dummy_feats,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Transform to right data type
|
| 137 |
+
feat = data_type_transform(feat_or_label_dict=features_dict)
|
| 138 |
+
|
| 139 |
+
t2 = time.time()
|
| 140 |
+
|
| 141 |
+
data = {}
|
| 142 |
+
data["input_feature_dict"] = feat
|
| 143 |
+
|
| 144 |
+
# Add dimension related items
|
| 145 |
+
N_token = feat["token_index"].shape[0]
|
| 146 |
+
N_atom = feat["atom_to_token_idx"].shape[0]
|
| 147 |
+
N_msa = feat["msa"].shape[0]
|
| 148 |
+
|
| 149 |
+
stats = {}
|
| 150 |
+
for mol_type in ["ligand", "protein", "dna", "rna"]:
|
| 151 |
+
mol_type_mask = feat[f"is_{mol_type}"].bool()
|
| 152 |
+
stats[f"{mol_type}/atom"] = int(mol_type_mask.sum(dim=-1).item())
|
| 153 |
+
stats[f"{mol_type}/token"] = len(
|
| 154 |
+
torch.unique(feat["atom_to_token_idx"][mol_type_mask])
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
N_asym = len(torch.unique(data["input_feature_dict"]["asym_id"]))
|
| 158 |
+
data.update(
|
| 159 |
+
{
|
| 160 |
+
"N_asym": torch.tensor([N_asym]),
|
| 161 |
+
"N_token": torch.tensor([N_token]),
|
| 162 |
+
"N_atom": torch.tensor([N_atom]),
|
| 163 |
+
"N_msa": torch.tensor([N_msa]),
|
| 164 |
+
}
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
def formatted_key(key):
|
| 168 |
+
type_, unit = key.split("/")
|
| 169 |
+
if type_ == "protein":
|
| 170 |
+
type_ = "prot"
|
| 171 |
+
elif type_ == "ligand":
|
| 172 |
+
type_ = "lig"
|
| 173 |
+
else:
|
| 174 |
+
pass
|
| 175 |
+
return f"N_{type_}_{unit}"
|
| 176 |
+
|
| 177 |
+
data.update(
|
| 178 |
+
{
|
| 179 |
+
formatted_key(k): torch.tensor([stats[k]])
|
| 180 |
+
for k in [
|
| 181 |
+
"protein/atom",
|
| 182 |
+
"ligand/atom",
|
| 183 |
+
"dna/atom",
|
| 184 |
+
"rna/atom",
|
| 185 |
+
"protein/token",
|
| 186 |
+
"ligand/token",
|
| 187 |
+
"dna/token",
|
| 188 |
+
"rna/token",
|
| 189 |
+
]
|
| 190 |
+
}
|
| 191 |
+
)
|
| 192 |
+
data.update({"entity_poly_type": entity_poly_type})
|
| 193 |
+
t3 = time.time()
|
| 194 |
+
time_tracker = {
|
| 195 |
+
"crop": t1 - t0,
|
| 196 |
+
"featurizer": t2 - t1,
|
| 197 |
+
"added_feature": t3 - t2,
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
return data, atom_array, time_tracker
|
| 201 |
+
|
| 202 |
+
def __len__(self) -> int:
|
| 203 |
+
return len(self.inputs)
|
| 204 |
+
|
| 205 |
+
def __getitem__(self, index: int) -> tuple[dict[str, torch.Tensor], AtomArray, str]:
|
| 206 |
+
try:
|
| 207 |
+
single_sample_dict = self.inputs[index]
|
| 208 |
+
sample_name = single_sample_dict["name"]
|
| 209 |
+
logger.info(f"Featurizing {sample_name}...")
|
| 210 |
+
|
| 211 |
+
data, atom_array, _ = self.process_one(
|
| 212 |
+
single_sample_dict=single_sample_dict
|
| 213 |
+
)
|
| 214 |
+
error_message = ""
|
| 215 |
+
except Exception as e:
|
| 216 |
+
data, atom_array = {}, None
|
| 217 |
+
error_message = f"{e}:\n{traceback.format_exc()}"
|
| 218 |
+
data["sample_name"] = single_sample_dict["name"]
|
| 219 |
+
data["sample_index"] = index
|
| 220 |
+
return data, atom_array, error_message
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_maker.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import copy
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
from biotite.structure import AtomArray, get_chain_starts, get_residue_starts
|
| 23 |
+
|
| 24 |
+
from protenix.data.constants import STD_RESIDUES
|
| 25 |
+
from protenix.data.filter import Filter
|
| 26 |
+
from protenix.data.parser import AddAtomArrayAnnot, MMCIFParser
|
| 27 |
+
from protenix.data.utils import get_lig_lig_bonds, get_ligand_polymer_bond_mask
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def merge_covalent_bonds(
|
| 31 |
+
covalent_bonds: list[dict], all_entity_counts: dict[str, int]
|
| 32 |
+
) -> list[dict]:
|
| 33 |
+
"""
|
| 34 |
+
Merge covalent bonds with same entity and position.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
covalent_bonds (list[dict]): A list of covalent bond dicts.
|
| 38 |
+
all_entity_counts (dict[str, int]): A dict of entity id to chain count.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
list[dict]: A list of merged covalent bond dicts.
|
| 42 |
+
"""
|
| 43 |
+
bonds_recorder = defaultdict(list)
|
| 44 |
+
bonds_entity_counts = {}
|
| 45 |
+
for bond_dict in covalent_bonds:
|
| 46 |
+
bond_unique_string = []
|
| 47 |
+
entity_counts = (
|
| 48 |
+
all_entity_counts[str(bond_dict["left_entity"])],
|
| 49 |
+
all_entity_counts[str(bond_dict["right_entity"])],
|
| 50 |
+
)
|
| 51 |
+
for i in ["left", "right"]:
|
| 52 |
+
for j in ["entity", "position", "atom"]:
|
| 53 |
+
k = f"{i}_{j}"
|
| 54 |
+
bond_unique_string.append(str(bond_dict[k]))
|
| 55 |
+
bond_unique_string = "_".join(bond_unique_string)
|
| 56 |
+
bonds_recorder[bond_unique_string].append(bond_dict)
|
| 57 |
+
bonds_entity_counts[bond_unique_string] = entity_counts
|
| 58 |
+
|
| 59 |
+
merged_covalent_bonds = []
|
| 60 |
+
for k, v in bonds_recorder.items():
|
| 61 |
+
left_counts = bonds_entity_counts[k][0]
|
| 62 |
+
right_counts = bonds_entity_counts[k][1]
|
| 63 |
+
if left_counts == right_counts == len(v):
|
| 64 |
+
bond_dict_copy = copy.deepcopy(v[0])
|
| 65 |
+
del bond_dict_copy["left_copy"]
|
| 66 |
+
del bond_dict_copy["right_copy"]
|
| 67 |
+
merged_covalent_bonds.append(bond_dict_copy)
|
| 68 |
+
else:
|
| 69 |
+
merged_covalent_bonds.extend(v)
|
| 70 |
+
return merged_covalent_bonds
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def atom_array_to_input_json(
|
| 74 |
+
atom_array: AtomArray,
|
| 75 |
+
parser: MMCIFParser,
|
| 76 |
+
assembly_id: str = None,
|
| 77 |
+
output_json: str = None,
|
| 78 |
+
sample_name=None,
|
| 79 |
+
save_entity_and_asym_id=False,
|
| 80 |
+
) -> dict:
|
| 81 |
+
"""
|
| 82 |
+
Convert a Biotite AtomArray to a dict that can be used as input to the model.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
atom_array (AtomArray): Biotite Atom array.
|
| 86 |
+
parser (MMCIFParser): Instantiated Protenix MMCIFParer.
|
| 87 |
+
assembly_id (str, optional): Assembly ID. Defaults to None.
|
| 88 |
+
output_json (str, optional): Output json file path. Defaults to None.
|
| 89 |
+
sample_name (_type_, optional): The "name" filed in json file. Defaults to None.
|
| 90 |
+
save_entity_and_asym_id (bool, optional): Whether to save entity and asym ids to json.
|
| 91 |
+
Defaults to False.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
dict: Protenix input json dict.
|
| 95 |
+
"""
|
| 96 |
+
# get sequences after modified AtomArray
|
| 97 |
+
entity_seq = parser.get_sequences(atom_array)
|
| 98 |
+
|
| 99 |
+
# add unique chain id
|
| 100 |
+
atom_array = AddAtomArrayAnnot.unique_chain_and_add_ids(atom_array)
|
| 101 |
+
|
| 102 |
+
# get lig entity sequences and position
|
| 103 |
+
label_entity_id_to_sequences = {}
|
| 104 |
+
lig_chain_ids = [] # record chain_id of the first asym chain
|
| 105 |
+
for label_entity_id in np.unique(atom_array.label_entity_id):
|
| 106 |
+
if label_entity_id not in parser.entity_poly_type:
|
| 107 |
+
current_lig_chain_ids = np.unique(
|
| 108 |
+
atom_array.chain_id[atom_array.label_entity_id == label_entity_id]
|
| 109 |
+
).tolist()
|
| 110 |
+
lig_chain_ids += current_lig_chain_ids
|
| 111 |
+
for chain_id in current_lig_chain_ids:
|
| 112 |
+
lig_atom_array = atom_array[atom_array.chain_id == chain_id]
|
| 113 |
+
starts = get_residue_starts(lig_atom_array, add_exclusive_stop=True)
|
| 114 |
+
seq = lig_atom_array.res_name[starts[:-1]].tolist()
|
| 115 |
+
label_entity_id_to_sequences[label_entity_id] = seq
|
| 116 |
+
|
| 117 |
+
# find polymer modifications
|
| 118 |
+
entity_id_to_mod_list = {}
|
| 119 |
+
for entity_id, res_names in parser.get_poly_res_names(atom_array).items():
|
| 120 |
+
modifications_list = []
|
| 121 |
+
for idx, res_name in enumerate(res_names):
|
| 122 |
+
if res_name not in STD_RESIDUES:
|
| 123 |
+
position = idx + 1
|
| 124 |
+
modifications_list.append([position, f"CCD_{res_name}"])
|
| 125 |
+
if modifications_list:
|
| 126 |
+
entity_id_to_mod_list[entity_id] = modifications_list
|
| 127 |
+
|
| 128 |
+
chain_starts = get_chain_starts(atom_array, add_exclusive_stop=False)
|
| 129 |
+
chain_starts_atom_array = atom_array[chain_starts]
|
| 130 |
+
|
| 131 |
+
json_dict = {
|
| 132 |
+
"sequences": [],
|
| 133 |
+
}
|
| 134 |
+
if assembly_id is not None:
|
| 135 |
+
json_dict["assembly_id"] = assembly_id
|
| 136 |
+
|
| 137 |
+
unique_label_entity_id = np.unique(atom_array.label_entity_id)
|
| 138 |
+
label_entity_id_to_entity_id_in_json = {}
|
| 139 |
+
chain_id_to_copy_id_dict = {}
|
| 140 |
+
for idx, label_entity_id in enumerate(unique_label_entity_id):
|
| 141 |
+
entity_id_in_json = str(idx + 1)
|
| 142 |
+
label_entity_id_to_entity_id_in_json[label_entity_id] = entity_id_in_json
|
| 143 |
+
chain_ids_in_entity = chain_starts_atom_array.chain_id[
|
| 144 |
+
chain_starts_atom_array.label_entity_id == label_entity_id
|
| 145 |
+
]
|
| 146 |
+
for chain_count, chain_id in enumerate(chain_ids_in_entity):
|
| 147 |
+
chain_id_to_copy_id_dict[chain_id] = chain_count + 1
|
| 148 |
+
copy_id = np.vectorize(chain_id_to_copy_id_dict.get)(atom_array.chain_id)
|
| 149 |
+
atom_array.set_annotation("copy_id", copy_id)
|
| 150 |
+
|
| 151 |
+
all_entity_counts = {}
|
| 152 |
+
skipped_entity_id = []
|
| 153 |
+
for label_entity_id in unique_label_entity_id:
|
| 154 |
+
entity_dict = {}
|
| 155 |
+
asym_chains = chain_starts_atom_array[
|
| 156 |
+
chain_starts_atom_array.label_entity_id == label_entity_id
|
| 157 |
+
]
|
| 158 |
+
entity_type = parser.entity_poly_type.get(label_entity_id, "ligand")
|
| 159 |
+
if entity_type != "ligand":
|
| 160 |
+
if entity_type == "polypeptide(L)":
|
| 161 |
+
entity_type = "proteinChain"
|
| 162 |
+
elif entity_type == "polydeoxyribonucleotide":
|
| 163 |
+
entity_type = "dnaSequence"
|
| 164 |
+
elif entity_type == "polyribonucleotide":
|
| 165 |
+
entity_type = "rnaSequence"
|
| 166 |
+
else:
|
| 167 |
+
# DNA/RNA hybrid, polypeptide(D), etc.
|
| 168 |
+
skipped_entity_id.append(label_entity_id)
|
| 169 |
+
continue
|
| 170 |
+
|
| 171 |
+
sequence = entity_seq.get(label_entity_id)
|
| 172 |
+
entity_dict["sequence"] = sequence
|
| 173 |
+
else:
|
| 174 |
+
# ligand
|
| 175 |
+
lig_ccd = "_".join(label_entity_id_to_sequences[label_entity_id])
|
| 176 |
+
entity_dict["ligand"] = f"CCD_{lig_ccd}"
|
| 177 |
+
entity_dict["count"] = len(asym_chains)
|
| 178 |
+
all_entity_counts[label_entity_id_to_entity_id_in_json[label_entity_id]] = len(
|
| 179 |
+
asym_chains
|
| 180 |
+
)
|
| 181 |
+
if save_entity_and_asym_id:
|
| 182 |
+
entity_dict["label_entity_id"] = str(label_entity_id)
|
| 183 |
+
entity_dict["label_asym_id"] = asym_chains.label_asym_id.tolist()
|
| 184 |
+
|
| 185 |
+
# add PTM info
|
| 186 |
+
if label_entity_id in entity_id_to_mod_list:
|
| 187 |
+
modifications = entity_id_to_mod_list[label_entity_id]
|
| 188 |
+
if entity_type == "proteinChain":
|
| 189 |
+
entity_dict["modifications"] = [
|
| 190 |
+
{"ptmPosition": position, "ptmType": mod_ccd_code}
|
| 191 |
+
for position, mod_ccd_code in modifications
|
| 192 |
+
]
|
| 193 |
+
else:
|
| 194 |
+
entity_dict["modifications"] = [
|
| 195 |
+
{"basePosition": position, "modificationType": mod_ccd_code}
|
| 196 |
+
for position, mod_ccd_code in modifications
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
json_dict["sequences"].append({entity_type: entity_dict})
|
| 200 |
+
|
| 201 |
+
# skip some uncommon entities
|
| 202 |
+
atom_array = atom_array[~np.isin(atom_array.label_entity_id, skipped_entity_id)]
|
| 203 |
+
|
| 204 |
+
# add covalent bonds
|
| 205 |
+
atom_array = AddAtomArrayAnnot.add_token_mol_type(
|
| 206 |
+
atom_array, parser.entity_poly_type
|
| 207 |
+
)
|
| 208 |
+
lig_polymer_bonds = get_ligand_polymer_bond_mask(atom_array, lig_include_ions=False)
|
| 209 |
+
lig_lig_bonds = get_lig_lig_bonds(atom_array, lig_include_ions=False)
|
| 210 |
+
inter_entity_bonds = np.vstack((lig_polymer_bonds, lig_lig_bonds))
|
| 211 |
+
|
| 212 |
+
lig_indices = np.where(np.isin(atom_array.chain_id, lig_chain_ids))[0]
|
| 213 |
+
lig_bond_mask = np.any(np.isin(inter_entity_bonds[:, :2], lig_indices), axis=1)
|
| 214 |
+
inter_entity_bonds = inter_entity_bonds[lig_bond_mask] # select bonds of ligands
|
| 215 |
+
if inter_entity_bonds.size != 0:
|
| 216 |
+
covalent_bonds = []
|
| 217 |
+
for atoms in inter_entity_bonds[:, :2]:
|
| 218 |
+
bond_dict = {}
|
| 219 |
+
for idx, i in enumerate(["left", "right"]):
|
| 220 |
+
atom = atom_array[atoms[idx]]
|
| 221 |
+
positon = atom.res_id
|
| 222 |
+
bond_dict[f"{i}_entity"] = int(
|
| 223 |
+
label_entity_id_to_entity_id_in_json[atom.label_entity_id]
|
| 224 |
+
)
|
| 225 |
+
bond_dict[f"{i}_position"] = int(positon)
|
| 226 |
+
bond_dict[f"{i}_atom"] = atom.atom_name
|
| 227 |
+
bond_dict[f"{i}_copy"] = int(atom.copy_id)
|
| 228 |
+
|
| 229 |
+
covalent_bonds.append(bond_dict)
|
| 230 |
+
|
| 231 |
+
# merge covalent_bonds for same entity
|
| 232 |
+
merged_covalent_bonds = merge_covalent_bonds(covalent_bonds, all_entity_counts)
|
| 233 |
+
json_dict["covalent_bonds"] = merged_covalent_bonds
|
| 234 |
+
|
| 235 |
+
json_dict["name"] = sample_name
|
| 236 |
+
|
| 237 |
+
if output_json is not None:
|
| 238 |
+
with open(output_json, "w") as f:
|
| 239 |
+
json.dump([json_dict], f, indent=4)
|
| 240 |
+
return json_dict
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def cif_to_input_json(
|
| 244 |
+
mmcif_file: str,
|
| 245 |
+
assembly_id: str = None,
|
| 246 |
+
altloc="first",
|
| 247 |
+
output_json: str = None,
|
| 248 |
+
sample_name=None,
|
| 249 |
+
save_entity_and_asym_id=False,
|
| 250 |
+
) -> dict:
|
| 251 |
+
"""
|
| 252 |
+
Convert mmcif file to Protenix input json file.
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
mmcif_file (str): mmCIF file path.
|
| 256 |
+
assembly_id (str, optional): Assembly ID. Defaults to None.
|
| 257 |
+
altloc (str, optional): Altloc selection. Defaults to "first".
|
| 258 |
+
output_json (str, optional): Output json file path. Defaults to None.
|
| 259 |
+
sample_name (_type_, optional): The "name" filed in json file. Defaults to None.
|
| 260 |
+
save_entity_and_asym_id (bool, optional): Whether to save entity and asym ids to json.
|
| 261 |
+
Defaults to False.
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
dict: Protenix input json dict.
|
| 265 |
+
"""
|
| 266 |
+
parser = MMCIFParser(mmcif_file)
|
| 267 |
+
atom_array = parser.get_structure(altloc, model=1, bond_lenth_threshold=None)
|
| 268 |
+
|
| 269 |
+
# remove HOH from entities
|
| 270 |
+
atom_array = Filter.remove_water(atom_array)
|
| 271 |
+
atom_array = Filter.remove_hydrogens(atom_array)
|
| 272 |
+
atom_array = parser.mse_to_met(atom_array)
|
| 273 |
+
atom_array = Filter.remove_element_X(atom_array)
|
| 274 |
+
|
| 275 |
+
# remove crystallization_aids
|
| 276 |
+
if any(["DIFFRACTION" in m for m in parser.methods]):
|
| 277 |
+
atom_array = Filter.remove_crystallization_aids(
|
| 278 |
+
atom_array, parser.entity_poly_type
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
if assembly_id is not None:
|
| 282 |
+
# expand created AtomArray by expand bioassembly
|
| 283 |
+
atom_array = parser.expand_assembly(atom_array, assembly_id)
|
| 284 |
+
|
| 285 |
+
if sample_name is None:
|
| 286 |
+
sample_name = os.path.basename(mmcif_file).split(".")[0]
|
| 287 |
+
|
| 288 |
+
json_dict = atom_array_to_input_json(
|
| 289 |
+
atom_array,
|
| 290 |
+
parser,
|
| 291 |
+
assembly_id,
|
| 292 |
+
output_json,
|
| 293 |
+
sample_name,
|
| 294 |
+
save_entity_and_asym_id=save_entity_and_asym_id,
|
| 295 |
+
)
|
| 296 |
+
return json_dict
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_parser.py
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import copy
|
| 16 |
+
import logging
|
| 17 |
+
import random
|
| 18 |
+
import warnings
|
| 19 |
+
from collections import Counter
|
| 20 |
+
from typing import Any
|
| 21 |
+
|
| 22 |
+
import biotite.structure as struc
|
| 23 |
+
import numpy as np
|
| 24 |
+
from biotite.structure import AtomArray
|
| 25 |
+
from rdkit import Chem
|
| 26 |
+
from rdkit.Chem import AllChem
|
| 27 |
+
|
| 28 |
+
from protenix.data import ccd
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
DNA_1to3 = {
|
| 34 |
+
"A": "DA",
|
| 35 |
+
"G": "DG",
|
| 36 |
+
"C": "DC",
|
| 37 |
+
"T": "DT",
|
| 38 |
+
"X": "DN",
|
| 39 |
+
"I": "DI", # eg: pdb 114d
|
| 40 |
+
"N": "DN", # eg: pdb 7r6t-3DR
|
| 41 |
+
"U": "DU", # eg: pdb 7sd8
|
| 42 |
+
}
|
| 43 |
+
RNA_1to3 = {
|
| 44 |
+
"A": "A",
|
| 45 |
+
"G": "G",
|
| 46 |
+
"C": "C",
|
| 47 |
+
"U": "U",
|
| 48 |
+
"X": "N",
|
| 49 |
+
"I": "I", # eg: pdb 7wv5
|
| 50 |
+
"N": "N",
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
PROTEIN_1to3 = {
|
| 54 |
+
"A": "ALA",
|
| 55 |
+
"R": "ARG",
|
| 56 |
+
"N": "ASN",
|
| 57 |
+
"D": "ASP",
|
| 58 |
+
"C": "CYS",
|
| 59 |
+
"Q": "GLN",
|
| 60 |
+
"E": "GLU",
|
| 61 |
+
"G": "GLY",
|
| 62 |
+
"H": "HIS",
|
| 63 |
+
"I": "ILE",
|
| 64 |
+
"L": "LEU",
|
| 65 |
+
"K": "LYS",
|
| 66 |
+
"M": "MET",
|
| 67 |
+
"F": "PHE",
|
| 68 |
+
"P": "PRO",
|
| 69 |
+
"S": "SER",
|
| 70 |
+
"T": "THR",
|
| 71 |
+
"W": "TRP",
|
| 72 |
+
"Y": "TYR",
|
| 73 |
+
"V": "VAL",
|
| 74 |
+
"X": "UNK",
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def add_reference_features(atom_array: AtomArray) -> AtomArray:
|
| 79 |
+
"""
|
| 80 |
+
Add reference features of each resiude to atom_array
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
atom_array (AtomArray): biotite AtomArray
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
AtomArray: biotite AtomArray with reference features
|
| 87 |
+
- ref_pos: reference conformer atom positions
|
| 88 |
+
- ref_charge (n): reference conformer atom charges
|
| 89 |
+
- ref_mask: reference conformer atom masks
|
| 90 |
+
"""
|
| 91 |
+
atom_count = len(atom_array)
|
| 92 |
+
ref_pos = np.zeros((atom_count, 3), dtype=np.float32)
|
| 93 |
+
ref_charge = np.zeros(atom_count, dtype=int)
|
| 94 |
+
ref_mask = np.zeros(atom_count, dtype=int)
|
| 95 |
+
|
| 96 |
+
starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 97 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 98 |
+
res_name = atom_array.res_name[start]
|
| 99 |
+
if res_name == "UNL":
|
| 100 |
+
# UNL is smiles ligand, copy info from atom_array
|
| 101 |
+
ref_pos[start:stop] = atom_array.coord[start:stop]
|
| 102 |
+
ref_charge[start:stop] = atom_array.charge[start:stop]
|
| 103 |
+
ref_mask[start:stop] = 1
|
| 104 |
+
continue
|
| 105 |
+
|
| 106 |
+
ref_info = ccd.get_ccd_ref_info(res_name)
|
| 107 |
+
if ref_info:
|
| 108 |
+
atom_sub_idx = [
|
| 109 |
+
*map(ref_info["atom_map"].get, atom_array.atom_name[start:stop])
|
| 110 |
+
]
|
| 111 |
+
ref_pos[start:stop] = ref_info["coord"][atom_sub_idx]
|
| 112 |
+
ref_charge[start:stop] = ref_info["charge"][atom_sub_idx]
|
| 113 |
+
ref_mask[start:stop] = ref_info["mask"][atom_sub_idx]
|
| 114 |
+
else:
|
| 115 |
+
logging.warning(f"no reference info for {res_name}")
|
| 116 |
+
|
| 117 |
+
atom_array.set_annotation("ref_pos", ref_pos)
|
| 118 |
+
atom_array.set_annotation("ref_charge", ref_charge)
|
| 119 |
+
atom_array.set_annotation("ref_mask", ref_mask)
|
| 120 |
+
return atom_array
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _remove_non_std_ccd_leaving_atoms(atom_array: AtomArray) -> AtomArray:
|
| 124 |
+
"""
|
| 125 |
+
Check polymer connections and remove non-standard leaving atoms
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
atom_array (AtomArray): biotite AtomArray
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
AtomArray: biotite AtomArray with leaving atoms removed.
|
| 132 |
+
"""
|
| 133 |
+
connected = np.zeros(atom_array.res_id[-1], dtype=bool)
|
| 134 |
+
for i, j, t in atom_array.bonds._bonds:
|
| 135 |
+
if abs(atom_array.res_id[i] - atom_array.res_id[j]) == 1:
|
| 136 |
+
connected[atom_array.res_id[[i, j]].min()] = True
|
| 137 |
+
|
| 138 |
+
leaving_atoms = np.zeros(len(atom_array), dtype=bool)
|
| 139 |
+
for res_id, conn in enumerate(connected):
|
| 140 |
+
if res_id == 0 or conn:
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
# Res_id start from 1
|
| 144 |
+
res_name_i = atom_array.res_name[atom_array.res_id == res_id][0]
|
| 145 |
+
res_name_j = atom_array.res_name[atom_array.res_id == res_id + 1][0]
|
| 146 |
+
warnings.warn(
|
| 147 |
+
f"No C-N or O3'-P bond between residue {res_name_i}({res_id}) and residue {res_name_j}({res_id+1}). \n"
|
| 148 |
+
f"all leaving atoms will be removed for both residues."
|
| 149 |
+
)
|
| 150 |
+
for idx, res_name in zip([res_id, res_id + 1], [res_name_i, res_name_j]):
|
| 151 |
+
staying_atoms = ccd.get_component_atom_array(
|
| 152 |
+
res_name, keep_leaving_atoms=False, keep_hydrogens=False
|
| 153 |
+
).atom_name
|
| 154 |
+
if idx == 1 and ccd.get_mol_type(res_name) in ("dna", "rna"):
|
| 155 |
+
staying_atoms = np.append(staying_atoms, ["OP3"])
|
| 156 |
+
if idx == atom_array.res_id[-1] and ccd.get_mol_type(res_name) == "protein":
|
| 157 |
+
staying_atoms = np.append(staying_atoms, ["OXT"])
|
| 158 |
+
leaving_atoms |= (atom_array.res_id == idx) & (
|
| 159 |
+
~np.isin(atom_array.atom_name, staying_atoms)
|
| 160 |
+
)
|
| 161 |
+
return atom_array[~leaving_atoms]
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def find_range_by_index(starts: np.ndarray, atom_index: int) -> tuple[int, int]:
|
| 165 |
+
"""
|
| 166 |
+
Find the residue range of an atom index
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
starts (np.ndarray): Residue starts or Chain starts with exclusive stop.
|
| 170 |
+
atom_index (int): Atom index.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
tuple[int, int]: range (start, stop).
|
| 174 |
+
"""
|
| 175 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 176 |
+
if start <= atom_index < stop:
|
| 177 |
+
return start, stop
|
| 178 |
+
raise ValueError(f"atom_index {atom_index} not found in starts {starts}")
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def remove_leaving_atoms(atom_array: AtomArray, bond_count: dict) -> AtomArray:
|
| 182 |
+
"""
|
| 183 |
+
Remove leaving atoms based on ccd info
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
atom_array (AtomArray): Biotite Atom array.
|
| 187 |
+
bond_count (dict): atom index -> Bond count.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
AtomArray: Biotite Atom array with leaving atoms removed.
|
| 191 |
+
"""
|
| 192 |
+
remove_indices = []
|
| 193 |
+
res_starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 194 |
+
for centre_idx, b_count in bond_count.items():
|
| 195 |
+
res_name = atom_array.res_name[centre_idx]
|
| 196 |
+
centre_name = atom_array.atom_name[centre_idx]
|
| 197 |
+
|
| 198 |
+
comp = ccd.get_component_atom_array(
|
| 199 |
+
res_name, keep_leaving_atoms=True, keep_hydrogens=False
|
| 200 |
+
)
|
| 201 |
+
if comp is None:
|
| 202 |
+
continue
|
| 203 |
+
|
| 204 |
+
leaving_groups = comp.central_to_leaving_groups.get(centre_name)
|
| 205 |
+
if leaving_groups is None:
|
| 206 |
+
continue
|
| 207 |
+
|
| 208 |
+
if b_count > len(leaving_groups):
|
| 209 |
+
warnings.warn(
|
| 210 |
+
f"centre atom {centre_name=} {res_name=} {centre_idx=} has {b_count} inter residue bonds, greater than number of leaving groups:{leaving_groups}, remove all leaving atoms.\n"
|
| 211 |
+
f"atom info: {atom_array[centre_idx]=}"
|
| 212 |
+
)
|
| 213 |
+
remove_groups = leaving_groups
|
| 214 |
+
else:
|
| 215 |
+
remove_groups = random.sample(leaving_groups, b_count)
|
| 216 |
+
|
| 217 |
+
start, stop = find_range_by_index(res_starts, centre_idx)
|
| 218 |
+
|
| 219 |
+
# Find leaving atom indices
|
| 220 |
+
for group in remove_groups:
|
| 221 |
+
for atom_name in group:
|
| 222 |
+
leaving_idx = np.where(atom_array.atom_name[start:stop] == atom_name)[0]
|
| 223 |
+
if len(leaving_idx) == 0:
|
| 224 |
+
logging.info(f"{atom_name=} not found in residue {res_name}, ")
|
| 225 |
+
continue
|
| 226 |
+
|
| 227 |
+
remove_indices.append(leaving_idx[0] + start)
|
| 228 |
+
|
| 229 |
+
if not remove_indices:
|
| 230 |
+
return atom_array
|
| 231 |
+
|
| 232 |
+
keep_mask = np.ones(len(atom_array), dtype=bool)
|
| 233 |
+
keep_mask[remove_indices] = False
|
| 234 |
+
return atom_array[keep_mask]
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def _add_bonds_to_terminal_residues(atom_array: AtomArray) -> AtomArray:
|
| 238 |
+
"""
|
| 239 |
+
Add bonds to terminal residues (eg: ACE, NME)
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
atom_array (AtomArray): Biotite AtomArray
|
| 243 |
+
|
| 244 |
+
Returns:
|
| 245 |
+
AtomArray: Biotite AtomArray with non-standard polymer bonds
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
if atom_array.res_name[0] == "ACE":
|
| 249 |
+
term_res_idx = atom_array.res_id[0]
|
| 250 |
+
next_res_idx = term_res_idx + 1
|
| 251 |
+
term_atom_idx = np.where(
|
| 252 |
+
(atom_array.res_id == term_res_idx) & (atom_array.atom_name == "C")
|
| 253 |
+
)[0]
|
| 254 |
+
next_atom_idx = np.where(
|
| 255 |
+
(atom_array.res_id == next_res_idx) & (atom_array.atom_name == "N")
|
| 256 |
+
)[0]
|
| 257 |
+
if len(term_atom_idx) > 0 and len(next_atom_idx) > 0:
|
| 258 |
+
atom_array.bonds.add_bond(term_atom_idx[0], next_atom_idx[0], 1)
|
| 259 |
+
|
| 260 |
+
if atom_array.res_name[-1] == "NME":
|
| 261 |
+
term_res_idx = atom_array.res_id[-1]
|
| 262 |
+
prev_res_idx = term_res_idx - 1
|
| 263 |
+
term_atom_idx = np.where(
|
| 264 |
+
(atom_array.res_id == term_res_idx) & (atom_array.atom_name == "N")
|
| 265 |
+
)[0]
|
| 266 |
+
prev_atom_idx = np.where(
|
| 267 |
+
(atom_array.res_id == prev_res_idx) & (atom_array.atom_name == "C")
|
| 268 |
+
)[0]
|
| 269 |
+
if len(prev_atom_idx) > 0 and len(term_atom_idx) > 0:
|
| 270 |
+
atom_array.bonds.add_bond(prev_atom_idx[0], term_atom_idx[0], 1)
|
| 271 |
+
|
| 272 |
+
return atom_array
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _build_polymer_atom_array(ccd_seqs: list[str]) -> tuple[AtomArray, struc.BondList]:
|
| 276 |
+
"""
|
| 277 |
+
Build polymer atom_array from ccd codes, but not remove leaving atoms
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
ccd_seqs: a list of ccd code in sequence, ["MET", "ALA"] or ["DA", "DT"]
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
AtomArray: Biotite AtomArray of chain
|
| 284 |
+
BondList: Biotite BondList of polymer bonds (C-N or O3'-P)
|
| 285 |
+
"""
|
| 286 |
+
chain = struc.AtomArray(0)
|
| 287 |
+
for res_id, res_name in enumerate(ccd_seqs):
|
| 288 |
+
# Keep all leaving atoms, will remove leaving atoms later
|
| 289 |
+
residue = ccd.get_component_atom_array(
|
| 290 |
+
res_name, keep_leaving_atoms=True, keep_hydrogens=False
|
| 291 |
+
)
|
| 292 |
+
residue.res_id[:] = res_id + 1
|
| 293 |
+
chain += residue
|
| 294 |
+
res_starts = struc.get_residue_starts(chain, add_exclusive_stop=True)
|
| 295 |
+
polymer_bonds = ccd._connect_inter_residue(chain, res_starts)
|
| 296 |
+
|
| 297 |
+
if chain.bonds is None:
|
| 298 |
+
chain.bonds = polymer_bonds
|
| 299 |
+
else:
|
| 300 |
+
chain.bonds = chain.bonds.merge(polymer_bonds)
|
| 301 |
+
|
| 302 |
+
chain = _add_bonds_to_terminal_residues(chain)
|
| 303 |
+
|
| 304 |
+
bond_count = {}
|
| 305 |
+
for i, j, t in polymer_bonds._bonds:
|
| 306 |
+
bond_count[i] = bond_count.get(i, 0) + 1
|
| 307 |
+
bond_count[j] = bond_count.get(j, 0) + 1
|
| 308 |
+
|
| 309 |
+
chain = remove_leaving_atoms(chain, bond_count)
|
| 310 |
+
|
| 311 |
+
chain = _remove_non_std_ccd_leaving_atoms(chain)
|
| 312 |
+
|
| 313 |
+
return chain
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def build_polymer(entity_info: dict) -> dict:
|
| 317 |
+
"""
|
| 318 |
+
Build a polymer from a polymer info dict
|
| 319 |
+
example: {
|
| 320 |
+
"name": "polymer",
|
| 321 |
+
"sequence": "GPDSMEEVVVPEEPPKLVSALATYVQQERLCTMFLSIANKLLPLKP",
|
| 322 |
+
"count": 1
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
Args:
|
| 326 |
+
item (dict): polymer info dict
|
| 327 |
+
|
| 328 |
+
Returns:
|
| 329 |
+
dict: {"atom_array": biotite_AtomArray_object}
|
| 330 |
+
"""
|
| 331 |
+
poly_type, info = list(entity_info.items())[0]
|
| 332 |
+
if poly_type == "proteinChain":
|
| 333 |
+
ccd_seqs = [PROTEIN_1to3[x] for x in info["sequence"]]
|
| 334 |
+
if modifications := info.get("modifications"):
|
| 335 |
+
for m in modifications:
|
| 336 |
+
index = m["ptmPosition"] - 1
|
| 337 |
+
mtype = m["ptmType"]
|
| 338 |
+
if mtype.startswith("CCD_"):
|
| 339 |
+
ccd_seqs[index] = mtype[4:]
|
| 340 |
+
else:
|
| 341 |
+
raise ValueError(f"unknown modification type: {mtype}")
|
| 342 |
+
if glycans := info.get("glycans"):
|
| 343 |
+
logging.warning(f"glycans not supported: {glycans}")
|
| 344 |
+
chain_array = _build_polymer_atom_array(ccd_seqs)
|
| 345 |
+
|
| 346 |
+
elif poly_type in ("dnaSequence", "rnaSequence"):
|
| 347 |
+
map_1to3 = DNA_1to3 if poly_type == "dnaSequence" else RNA_1to3
|
| 348 |
+
ccd_seqs = [map_1to3[x] for x in info["sequence"]]
|
| 349 |
+
if modifications := info.get("modifications"):
|
| 350 |
+
for m in modifications:
|
| 351 |
+
index = m["basePosition"] - 1
|
| 352 |
+
mtype = m["modificationType"]
|
| 353 |
+
if mtype.startswith("CCD_"):
|
| 354 |
+
ccd_seqs[index] = mtype[4:]
|
| 355 |
+
else:
|
| 356 |
+
raise ValueError(f"unknown modification type: {mtype}")
|
| 357 |
+
chain_array = _build_polymer_atom_array(ccd_seqs)
|
| 358 |
+
|
| 359 |
+
else:
|
| 360 |
+
raise ValueError(
|
| 361 |
+
"polymer type must be proteinChain, dnaSequence or rnaSequence"
|
| 362 |
+
)
|
| 363 |
+
chain_array = add_reference_features(chain_array)
|
| 364 |
+
return {"atom_array": chain_array}
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def rdkit_mol_to_atom_array(mol: Chem.Mol, removeHs: bool = True) -> AtomArray:
|
| 368 |
+
"""
|
| 369 |
+
Convert rdkit mol to biotite AtomArray
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
mol (Chem.Mol): rdkit mol
|
| 373 |
+
removeHs (bool): whether to remove hydrogens in atom_array
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
AtomArray: biotite AtomArray
|
| 377 |
+
"""
|
| 378 |
+
atom_count = mol.GetNumAtoms()
|
| 379 |
+
atom_array = AtomArray(atom_count)
|
| 380 |
+
atom_array.hetero[:] = True
|
| 381 |
+
atom_array.res_name[:] = "UNL"
|
| 382 |
+
atom_array.add_annotation("charge", int)
|
| 383 |
+
|
| 384 |
+
conf = mol.GetConformer()
|
| 385 |
+
coord = conf.GetPositions()
|
| 386 |
+
|
| 387 |
+
element_count = Counter()
|
| 388 |
+
for i, atom in enumerate(mol.GetAtoms()):
|
| 389 |
+
element = atom.GetSymbol().upper()
|
| 390 |
+
element_count[element] += 1
|
| 391 |
+
atom_name = f"{element}{element_count[element]}"
|
| 392 |
+
|
| 393 |
+
atom.SetProp("name", atom_name)
|
| 394 |
+
|
| 395 |
+
atom_array.atom_name[i] = atom_name
|
| 396 |
+
atom_array.element[i] = element
|
| 397 |
+
atom_array.charge[i] = atom.GetFormalCharge()
|
| 398 |
+
atom_array.coord[i, :] = coord[i, :]
|
| 399 |
+
|
| 400 |
+
bonds = []
|
| 401 |
+
for bond in mol.GetBonds():
|
| 402 |
+
bonds.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
|
| 403 |
+
atom_array.bonds = struc.BondList(atom_count, np.array(bonds))
|
| 404 |
+
if removeHs:
|
| 405 |
+
atom_array = atom_array[atom_array.element != "H"]
|
| 406 |
+
return atom_array
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def rdkit_mol_to_atom_info(mol: Chem.Mol) -> dict[str, Any]:
|
| 410 |
+
"""
|
| 411 |
+
Convert RDKit Mol to atom_info dict.
|
| 412 |
+
|
| 413 |
+
Args:
|
| 414 |
+
mol (Chem.Mol): rdkit mol
|
| 415 |
+
|
| 416 |
+
Returns:
|
| 417 |
+
dict: info of atoms
|
| 418 |
+
example: {
|
| 419 |
+
"atom_array": biotite_AtomArray_object,
|
| 420 |
+
"atom_map_to_atom_name": {1: "C2"}, # only for smiles
|
| 421 |
+
}
|
| 422 |
+
"""
|
| 423 |
+
atom_info = {}
|
| 424 |
+
atom_map_to_atom_name = {}
|
| 425 |
+
atom_idx_to_atom_name = {}
|
| 426 |
+
|
| 427 |
+
element_count = Counter()
|
| 428 |
+
for atom in mol.GetAtoms():
|
| 429 |
+
element = atom.GetSymbol().upper()
|
| 430 |
+
element_count[element] += 1
|
| 431 |
+
atom_name = f"{element}{element_count[element]}"
|
| 432 |
+
atom.SetProp("name", atom_name)
|
| 433 |
+
if atom.GetAtomMapNum() != 0:
|
| 434 |
+
atom_map_to_atom_name[atom.GetAtomMapNum()] = atom_name
|
| 435 |
+
atom_idx_to_atom_name[atom.GetIdx()] = atom_name
|
| 436 |
+
|
| 437 |
+
if atom_map_to_atom_name:
|
| 438 |
+
# Atom map for input SMILES
|
| 439 |
+
atom_info["atom_map_to_atom_name"] = atom_map_to_atom_name
|
| 440 |
+
else:
|
| 441 |
+
# Atom index for input file
|
| 442 |
+
atom_info["atom_map_to_atom_name"] = atom_idx_to_atom_name
|
| 443 |
+
|
| 444 |
+
# Atom_array without hydrogens
|
| 445 |
+
atom_info["atom_array"] = rdkit_mol_to_atom_array(mol, removeHs=True)
|
| 446 |
+
return atom_info
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def lig_file_to_atom_info(lig_file_path: str) -> dict[str, Any]:
|
| 450 |
+
"""
|
| 451 |
+
Convert ligand file to biotite AtomArray.
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
lig_file_path (str): ligand file path with one of the following suffixes: [mol, mol2, sdf, pdb]
|
| 455 |
+
|
| 456 |
+
Returns:
|
| 457 |
+
dict: info of atoms
|
| 458 |
+
example: {
|
| 459 |
+
"atom_array": biotite_AtomArray_object,
|
| 460 |
+
"atom_map_to_atom_name": {1: "C2"}, # only for smiles
|
| 461 |
+
}
|
| 462 |
+
"""
|
| 463 |
+
if lig_file_path.endswith(".mol"):
|
| 464 |
+
mol = Chem.MolFromMolFile(lig_file_path)
|
| 465 |
+
elif lig_file_path.endswith(".sdf"):
|
| 466 |
+
suppl = Chem.SDMolSupplier(lig_file_path)
|
| 467 |
+
mol = next(suppl)
|
| 468 |
+
elif lig_file_path.endswith(".pdb"):
|
| 469 |
+
mol = Chem.MolFromPDBFile(lig_file_path)
|
| 470 |
+
elif lig_file_path.endswith(".mol2"):
|
| 471 |
+
mol = Chem.MolFromMol2File(lig_file_path)
|
| 472 |
+
else:
|
| 473 |
+
raise ValueError(f"Invalid ligand file type: .{lig_file_path.split('.')[-1]}")
|
| 474 |
+
assert (
|
| 475 |
+
mol is not None
|
| 476 |
+
), f"Failed to retrieve molecule from file, invalid ligand file: {lig_file_path}. \
|
| 477 |
+
Please provide a file with one of the following suffixes: [mol, mol2, sdf, pdb]."
|
| 478 |
+
|
| 479 |
+
assert (
|
| 480 |
+
mol.GetConformer().Is3D()
|
| 481 |
+
), f"3D conformer not found in ligand file: {lig_file_path}"
|
| 482 |
+
atom_info = rdkit_mol_to_atom_info(mol)
|
| 483 |
+
return atom_info
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def smiles_to_atom_info(smiles: str) -> dict:
|
| 487 |
+
"""
|
| 488 |
+
Convert smiles to atom_array, and atom_map_to_atom_name
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
smiles (str): smiles string, like "CCCC", or "[C:1]NC(=O)" (use num to label covalent bond atom.)
|
| 492 |
+
|
| 493 |
+
Returns:
|
| 494 |
+
dict: info of atoms
|
| 495 |
+
example: {
|
| 496 |
+
"atom_array": biotite_AtomArray_object,
|
| 497 |
+
"atom_map_to_atom_name": {1: "C2"}, # only for smiles
|
| 498 |
+
}
|
| 499 |
+
"""
|
| 500 |
+
atom_info = {}
|
| 501 |
+
mol = Chem.MolFromSmiles(smiles)
|
| 502 |
+
mol = Chem.AddHs(mol)
|
| 503 |
+
ret_code = AllChem.EmbedMolecule(mol)
|
| 504 |
+
assert ret_code == 0, f"Conformer generation failed for input SMILES: {smiles}"
|
| 505 |
+
atom_info = rdkit_mol_to_atom_info(mol)
|
| 506 |
+
return atom_info
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def build_ligand(entity_info: dict) -> dict:
|
| 510 |
+
"""
|
| 511 |
+
Build a ligand from a ligand entity info dict
|
| 512 |
+
example1: {
|
| 513 |
+
"ligand": {
|
| 514 |
+
"ligand": "CCD_ATP",
|
| 515 |
+
"count": 1
|
| 516 |
+
}
|
| 517 |
+
},
|
| 518 |
+
example2:{
|
| 519 |
+
"ligand": {
|
| 520 |
+
"ligand": "CCC=O", # smiles
|
| 521 |
+
"count": 1
|
| 522 |
+
}
|
| 523 |
+
},
|
| 524 |
+
example3:{
|
| 525 |
+
"ion": {
|
| 526 |
+
"ion": "NA",
|
| 527 |
+
"count": 3
|
| 528 |
+
}
|
| 529 |
+
},
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
entity_info (dict): ligand entity info
|
| 533 |
+
|
| 534 |
+
Returns:
|
| 535 |
+
dict: info of atoms
|
| 536 |
+
example: {
|
| 537 |
+
"atom_array": biotite_AtomArray_object,
|
| 538 |
+
"index_to_atom_name": {1: "C2"}, # only for smiles
|
| 539 |
+
}
|
| 540 |
+
"""
|
| 541 |
+
if info := entity_info.get("ion"):
|
| 542 |
+
ccd_code = [info["ion"]]
|
| 543 |
+
elif info := entity_info.get("ligand"):
|
| 544 |
+
ligand_str = info["ligand"]
|
| 545 |
+
if ligand_str.startswith("CCD_"):
|
| 546 |
+
ccd_code = ligand_str[4:].split("_")
|
| 547 |
+
else:
|
| 548 |
+
ccd_code = None
|
| 549 |
+
|
| 550 |
+
atom_info = {}
|
| 551 |
+
if ccd_code is not None:
|
| 552 |
+
atom_array = AtomArray(0)
|
| 553 |
+
res_ids = []
|
| 554 |
+
for idx, code in enumerate(ccd_code):
|
| 555 |
+
ccd_atom_array = ccd.get_component_atom_array(
|
| 556 |
+
code, keep_leaving_atoms=True, keep_hydrogens=False
|
| 557 |
+
)
|
| 558 |
+
atom_array += ccd_atom_array
|
| 559 |
+
res_id = idx + 1
|
| 560 |
+
res_ids += [res_id] * len(ccd_atom_array)
|
| 561 |
+
atom_info["atom_array"] = atom_array
|
| 562 |
+
atom_info["atom_array"].res_id[:] = res_ids
|
| 563 |
+
else:
|
| 564 |
+
if info["ligand"].startswith("FILE_"):
|
| 565 |
+
lig_file_path = ligand_str[5:]
|
| 566 |
+
atom_info = lig_file_to_atom_info(lig_file_path)
|
| 567 |
+
else:
|
| 568 |
+
atom_info = smiles_to_atom_info(ligand_str)
|
| 569 |
+
atom_info["atom_array"].res_id[:] = 1
|
| 570 |
+
atom_info["atom_array"] = add_reference_features(atom_info["atom_array"])
|
| 571 |
+
return atom_info
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def add_entity_atom_array(single_job_dict: dict) -> dict:
|
| 575 |
+
"""
|
| 576 |
+
Add atom_array to each entity in single_job_dict
|
| 577 |
+
|
| 578 |
+
Args:
|
| 579 |
+
single_job_dict (dict): input job dict
|
| 580 |
+
|
| 581 |
+
Returns:
|
| 582 |
+
dict: deepcopy and updated job dict with atom_array
|
| 583 |
+
"""
|
| 584 |
+
single_job_dict = copy.deepcopy(single_job_dict)
|
| 585 |
+
sequences = single_job_dict["sequences"]
|
| 586 |
+
smiles_ligand_count = 0
|
| 587 |
+
for entity_info in sequences:
|
| 588 |
+
if info := entity_info.get("proteinChain"):
|
| 589 |
+
atom_info = build_polymer(entity_info)
|
| 590 |
+
elif info := entity_info.get("dnaSequence"):
|
| 591 |
+
atom_info = build_polymer(entity_info)
|
| 592 |
+
elif info := entity_info.get("rnaSequence"):
|
| 593 |
+
atom_info = build_polymer(entity_info)
|
| 594 |
+
elif info := entity_info.get("ligand"):
|
| 595 |
+
atom_info = build_ligand(entity_info)
|
| 596 |
+
if not info["ligand"].startswith("CCD_"):
|
| 597 |
+
smiles_ligand_count += 1
|
| 598 |
+
assert smiles_ligand_count <= 99, "too many smiles ligands"
|
| 599 |
+
# use lower case res_name (l01, l02, ..., l99) to avoid conflict with CCD code
|
| 600 |
+
atom_info["atom_array"].res_name[:] = f"l{smiles_ligand_count:02d}"
|
| 601 |
+
elif info := entity_info.get("ion"):
|
| 602 |
+
atom_info = build_ligand(entity_info)
|
| 603 |
+
else:
|
| 604 |
+
raise ValueError(
|
| 605 |
+
"entity type must be proteinChain, dnaSequence, rnaSequence, ligand or ion"
|
| 606 |
+
)
|
| 607 |
+
info.update(atom_info)
|
| 608 |
+
return single_job_dict
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/json_to_feature.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import copy
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
from biotite.structure import AtomArray
|
| 21 |
+
|
| 22 |
+
from protenix.data.featurizer import Featurizer
|
| 23 |
+
from protenix.data.json_parser import add_entity_atom_array, remove_leaving_atoms
|
| 24 |
+
from protenix.data.parser import AddAtomArrayAnnot
|
| 25 |
+
from protenix.data.tokenizer import AtomArrayTokenizer, TokenArray
|
| 26 |
+
from protenix.data.utils import int_to_letters
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class SampleDictToFeatures:
|
| 32 |
+
def __init__(self, single_sample_dict):
|
| 33 |
+
self.single_sample_dict = single_sample_dict
|
| 34 |
+
self.input_dict = add_entity_atom_array(single_sample_dict)
|
| 35 |
+
self.entity_poly_type = self.get_entity_poly_type()
|
| 36 |
+
|
| 37 |
+
def get_entity_poly_type(self) -> dict[str, str]:
|
| 38 |
+
"""
|
| 39 |
+
Get the entity type for each entity.
|
| 40 |
+
|
| 41 |
+
Allowed Value for "_entity_poly.type":
|
| 42 |
+
· cyclic-pseudo-peptide
|
| 43 |
+
· other
|
| 44 |
+
· peptide nucleic acid
|
| 45 |
+
· polydeoxyribonucleotide
|
| 46 |
+
· polydeoxyribonucleotide/polyribonucleotide hybrid
|
| 47 |
+
· polypeptide(D)
|
| 48 |
+
· polypeptide(L)
|
| 49 |
+
· polyribonucleotide
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
dict[str, str]: a dict of polymer entity id to entity type.
|
| 53 |
+
"""
|
| 54 |
+
entity_type_mapping_dict = {
|
| 55 |
+
"proteinChain": "polypeptide(L)",
|
| 56 |
+
"dnaSequence": "polydeoxyribonucleotide",
|
| 57 |
+
"rnaSequence": "polyribonucleotide",
|
| 58 |
+
}
|
| 59 |
+
entity_poly_type = {}
|
| 60 |
+
for idx, type2entity_dict in enumerate(self.input_dict["sequences"]):
|
| 61 |
+
assert len(type2entity_dict) == 1, "Only one entity type is allowed."
|
| 62 |
+
for entity_type, entity in type2entity_dict.items():
|
| 63 |
+
if "sequence" in entity:
|
| 64 |
+
assert entity_type in [
|
| 65 |
+
"proteinChain",
|
| 66 |
+
"dnaSequence",
|
| 67 |
+
"rnaSequence",
|
| 68 |
+
], 'The "sequences" field accepts only these entity types: ["proteinChain", "dnaSequence", "rnaSequence"].'
|
| 69 |
+
entity_poly_type[str(idx + 1)] = entity_type_mapping_dict[
|
| 70 |
+
entity_type
|
| 71 |
+
]
|
| 72 |
+
return entity_poly_type
|
| 73 |
+
|
| 74 |
+
def build_full_atom_array(self) -> AtomArray:
|
| 75 |
+
"""
|
| 76 |
+
By assembling the AtomArray of each entity, a complete AtomArray is created.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
AtomArray: Biotite Atom array.
|
| 80 |
+
"""
|
| 81 |
+
atom_array = None
|
| 82 |
+
asym_chain_idx = 0
|
| 83 |
+
for idx, type2entity_dict in enumerate(self.input_dict["sequences"]):
|
| 84 |
+
for entity_type, entity in type2entity_dict.items():
|
| 85 |
+
entity_id = str(idx + 1)
|
| 86 |
+
|
| 87 |
+
entity_atom_array = None
|
| 88 |
+
for asym_chain_count in range(1, entity["count"] + 1):
|
| 89 |
+
asym_id_str = int_to_letters(asym_chain_idx + 1)
|
| 90 |
+
asym_chain = copy.deepcopy(entity["atom_array"])
|
| 91 |
+
chain_id = [asym_id_str] * len(asym_chain)
|
| 92 |
+
copy_id = [asym_chain_count] * len(asym_chain)
|
| 93 |
+
asym_chain.set_annotation("label_asym_id", chain_id)
|
| 94 |
+
asym_chain.set_annotation("auth_asym_id", chain_id)
|
| 95 |
+
asym_chain.set_annotation("chain_id", chain_id)
|
| 96 |
+
asym_chain.set_annotation("label_seq_id", asym_chain.res_id)
|
| 97 |
+
asym_chain.set_annotation("copy_id", copy_id)
|
| 98 |
+
# breakpoint()
|
| 99 |
+
if entity_atom_array is None:
|
| 100 |
+
entity_atom_array = asym_chain
|
| 101 |
+
else:
|
| 102 |
+
entity_atom_array += asym_chain
|
| 103 |
+
asym_chain_idx += 1
|
| 104 |
+
|
| 105 |
+
entity_atom_array.set_annotation(
|
| 106 |
+
"label_entity_id", [entity_id] * len(entity_atom_array)
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
if entity_type in ["proteinChain", "dnaSequence", "rnaSequence"]:
|
| 110 |
+
entity_atom_array.hetero[:] = False
|
| 111 |
+
else:
|
| 112 |
+
entity_atom_array.hetero[:] = True
|
| 113 |
+
|
| 114 |
+
if atom_array is None:
|
| 115 |
+
atom_array = entity_atom_array
|
| 116 |
+
else:
|
| 117 |
+
atom_array += entity_atom_array
|
| 118 |
+
return atom_array
|
| 119 |
+
|
| 120 |
+
@staticmethod
|
| 121 |
+
def get_a_bond_atom(
|
| 122 |
+
atom_array: AtomArray,
|
| 123 |
+
entity_id: int,
|
| 124 |
+
position: int,
|
| 125 |
+
atom_name: str,
|
| 126 |
+
copy_id: int = None,
|
| 127 |
+
) -> np.ndarray:
|
| 128 |
+
"""
|
| 129 |
+
Get the atom index of a bond atom.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
atom_array (AtomArray): Biotite Atom array.
|
| 133 |
+
entity_id (int): Entity id.
|
| 134 |
+
position (int): Residue index of the atom.
|
| 135 |
+
atom_name (str): Atom name.
|
| 136 |
+
copy_id (copy_id): A asym chain id in N copies of an entity.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
np.ndarray: Array of indices for specified atoms on each asym chain.
|
| 140 |
+
"""
|
| 141 |
+
entity_mask = atom_array.label_entity_id == str(entity_id)
|
| 142 |
+
position_mask = atom_array.res_id == int(position)
|
| 143 |
+
atom_name_mask = atom_array.atom_name == str(atom_name)
|
| 144 |
+
|
| 145 |
+
if copy_id is not None:
|
| 146 |
+
copy_mask = atom_array.copy_id == int(copy_id)
|
| 147 |
+
mask = entity_mask & position_mask & atom_name_mask & copy_mask
|
| 148 |
+
else:
|
| 149 |
+
mask = entity_mask & position_mask & atom_name_mask
|
| 150 |
+
atom_indices = np.where(mask)[0]
|
| 151 |
+
return atom_indices
|
| 152 |
+
|
| 153 |
+
def add_bonds_between_entities(self, atom_array: AtomArray) -> AtomArray:
|
| 154 |
+
"""
|
| 155 |
+
Based on the information in the "covalent_bonds",
|
| 156 |
+
add a bond between specified atoms on each pair of asymmetric chains of the two entities.
|
| 157 |
+
Note that this requires the number of asymmetric chains in both entities to be equal.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
atom_array (AtomArray): Biotite Atom array.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
AtomArray: Biotite Atom array with bonds added.
|
| 164 |
+
"""
|
| 165 |
+
if "covalent_bonds" not in self.input_dict:
|
| 166 |
+
return atom_array
|
| 167 |
+
|
| 168 |
+
bond_count = {}
|
| 169 |
+
for bond_info_dict in self.input_dict["covalent_bonds"]:
|
| 170 |
+
bond_atoms = []
|
| 171 |
+
for i in ["left", "right"]:
|
| 172 |
+
entity_id = int(bond_info_dict[f"{i}_entity"])
|
| 173 |
+
copy_id = int(bond_info_dict.get(f"{i}_copy"))
|
| 174 |
+
position = int(bond_info_dict[f"{i}_position"])
|
| 175 |
+
atom_name = bond_info_dict[f"{i}_atom"]
|
| 176 |
+
|
| 177 |
+
if isinstance(atom_name, str):
|
| 178 |
+
if atom_name.isdigit():
|
| 179 |
+
# Convert SMILES atom index to int
|
| 180 |
+
atom_name = int(atom_name)
|
| 181 |
+
|
| 182 |
+
if isinstance(atom_name, int):
|
| 183 |
+
# Convert AtomMap in SMILES to atom name in AtomArray
|
| 184 |
+
entity_dict = self.input_dict["sequences"][
|
| 185 |
+
int(entity_id - 1)
|
| 186 |
+
].values()
|
| 187 |
+
assert "atom_map_to_atom_name" in entity_dict
|
| 188 |
+
atom_name = entity_dict["atom_map_to_atom_name"][atom_name]
|
| 189 |
+
|
| 190 |
+
# Get bond atoms by entity_id, position, atom_name
|
| 191 |
+
atom_indices = self.get_a_bond_atom(
|
| 192 |
+
atom_array, entity_id, position, atom_name, copy_id
|
| 193 |
+
)
|
| 194 |
+
assert (
|
| 195 |
+
atom_indices.size > 0
|
| 196 |
+
), f"No atom found for {atom_name} in entity {entity_id} at position {position}."
|
| 197 |
+
bond_atoms.append(atom_indices)
|
| 198 |
+
|
| 199 |
+
assert len(bond_atoms[0]) == len(
|
| 200 |
+
bond_atoms[1]
|
| 201 |
+
), f'Can not create bonds because the "count" of entity {bond_info_dict["left_entity"]} \
|
| 202 |
+
and {bond_info_dict["right_entity"]} are not equal. '
|
| 203 |
+
|
| 204 |
+
# Create bond between each asym chain pair
|
| 205 |
+
for atom_idx1, atom_idx2 in zip(bond_atoms[0], bond_atoms[1]):
|
| 206 |
+
atom_array.bonds.add_bond(atom_idx1, atom_idx2, 1)
|
| 207 |
+
bond_count[atom_idx1] = bond_count.get(atom_idx1, 0) + 1
|
| 208 |
+
bond_count[atom_idx2] = bond_count.get(atom_idx2, 0) + 1
|
| 209 |
+
|
| 210 |
+
atom_array = remove_leaving_atoms(atom_array, bond_count)
|
| 211 |
+
|
| 212 |
+
return atom_array
|
| 213 |
+
|
| 214 |
+
@staticmethod
|
| 215 |
+
def add_atom_array_attributes(
|
| 216 |
+
atom_array: AtomArray, entity_poly_type: dict[str, str]
|
| 217 |
+
) -> AtomArray:
|
| 218 |
+
"""
|
| 219 |
+
Add attributes to the Biotite AtomArray.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
atom_array (AtomArray): Biotite Atom array.
|
| 223 |
+
entity_poly_type (dict[str, str]): a dict of polymer entity id to entity type.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
AtomArray: Biotite Atom array with attributes added.
|
| 227 |
+
"""
|
| 228 |
+
atom_array = AddAtomArrayAnnot.add_token_mol_type(atom_array, entity_poly_type)
|
| 229 |
+
atom_array = AddAtomArrayAnnot.add_centre_atom_mask(atom_array)
|
| 230 |
+
atom_array = AddAtomArrayAnnot.add_atom_mol_type_mask(atom_array)
|
| 231 |
+
atom_array = AddAtomArrayAnnot.add_distogram_rep_atom_mask(atom_array)
|
| 232 |
+
atom_array = AddAtomArrayAnnot.add_plddt_m_rep_atom_mask(atom_array)
|
| 233 |
+
atom_array = AddAtomArrayAnnot.add_cano_seq_resname(atom_array)
|
| 234 |
+
atom_array = AddAtomArrayAnnot.add_tokatom_idx(atom_array)
|
| 235 |
+
atom_array = AddAtomArrayAnnot.add_modified_res_mask(atom_array)
|
| 236 |
+
atom_array = AddAtomArrayAnnot.unique_chain_and_add_ids(atom_array)
|
| 237 |
+
atom_array = AddAtomArrayAnnot.find_equiv_mol_and_assign_ids(
|
| 238 |
+
atom_array, check_final_equiv=False
|
| 239 |
+
)
|
| 240 |
+
atom_array = AddAtomArrayAnnot.add_ref_space_uid(atom_array)
|
| 241 |
+
return atom_array
|
| 242 |
+
|
| 243 |
+
@staticmethod
|
| 244 |
+
def mse_to_met(atom_array: AtomArray) -> AtomArray:
|
| 245 |
+
"""
|
| 246 |
+
Ref: AlphaFold3 SI chapter 2.1
|
| 247 |
+
MSE residues are converted to MET residues.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
AtomArray: Biotite AtomArray object after converted MSE to MET.
|
| 254 |
+
"""
|
| 255 |
+
mse = atom_array.res_name == "MSE"
|
| 256 |
+
se = mse & (atom_array.atom_name == "SE")
|
| 257 |
+
atom_array.atom_name[se] = "SD"
|
| 258 |
+
atom_array.element[se] = "S"
|
| 259 |
+
atom_array.res_name[mse] = "MET"
|
| 260 |
+
atom_array.hetero[mse] = False
|
| 261 |
+
return atom_array
|
| 262 |
+
|
| 263 |
+
def get_atom_array(self) -> AtomArray:
|
| 264 |
+
"""
|
| 265 |
+
Create a Biotite AtomArray and add attributes from the input dict.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
AtomArray: Biotite Atom array.
|
| 269 |
+
"""
|
| 270 |
+
atom_array = self.build_full_atom_array()
|
| 271 |
+
atom_array = self.add_bonds_between_entities(atom_array)
|
| 272 |
+
atom_array = self.mse_to_met(atom_array)
|
| 273 |
+
atom_array = self.add_atom_array_attributes(atom_array, self.entity_poly_type)
|
| 274 |
+
return atom_array
|
| 275 |
+
|
| 276 |
+
def get_feature_dict(self) -> tuple[dict[str, torch.Tensor], AtomArray, TokenArray]:
|
| 277 |
+
"""
|
| 278 |
+
Generates a feature dictionary from the input sample dictionary.
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
A tuple containing:
|
| 282 |
+
- A dictionary of features.
|
| 283 |
+
- An AtomArray object.
|
| 284 |
+
- A TokenArray object.
|
| 285 |
+
"""
|
| 286 |
+
atom_array = self.get_atom_array()
|
| 287 |
+
|
| 288 |
+
aa_tokenizer = AtomArrayTokenizer(atom_array)
|
| 289 |
+
token_array = aa_tokenizer.get_token_array()
|
| 290 |
+
|
| 291 |
+
featurizer = Featurizer(token_array, atom_array)
|
| 292 |
+
feature_dict = featurizer.get_all_input_features()
|
| 293 |
+
|
| 294 |
+
token_array_with_frame = featurizer.get_token_frame(
|
| 295 |
+
token_array=token_array,
|
| 296 |
+
atom_array=atom_array,
|
| 297 |
+
ref_pos=feature_dict["ref_pos"],
|
| 298 |
+
ref_mask=feature_dict["ref_mask"],
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# [N_token]
|
| 302 |
+
feature_dict["has_frame"] = torch.Tensor(
|
| 303 |
+
token_array_with_frame.get_annotation("has_frame")
|
| 304 |
+
).long()
|
| 305 |
+
|
| 306 |
+
# [N_token, 3]
|
| 307 |
+
feature_dict["frame_atom_index"] = torch.Tensor(
|
| 308 |
+
token_array_with_frame.get_annotation("frame_atom_index")
|
| 309 |
+
).long()
|
| 310 |
+
return feature_dict, atom_array, token_array
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/msa_featurizer.py
ADDED
|
@@ -0,0 +1,1162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import shutil
|
| 18 |
+
from abc import ABC, abstractmethod
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
from copy import deepcopy
|
| 21 |
+
from os.path import exists as opexists
|
| 22 |
+
from os.path import join as opjoin
|
| 23 |
+
from typing import Any, Mapping, Optional, Sequence, Union
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
from biotite.structure import AtomArray
|
| 28 |
+
|
| 29 |
+
from protenix.data.constants import STD_RESIDUES, rna_order_with_x
|
| 30 |
+
from protenix.data.msa_utils import (
|
| 31 |
+
PROT_TYPE_NAME,
|
| 32 |
+
FeatureDict,
|
| 33 |
+
add_assembly_features,
|
| 34 |
+
clip_msa,
|
| 35 |
+
convert_monomer_features,
|
| 36 |
+
get_identifier_func,
|
| 37 |
+
load_and_process_msa,
|
| 38 |
+
make_sequence_features,
|
| 39 |
+
merge_features_from_prot_rna,
|
| 40 |
+
msa_parallel,
|
| 41 |
+
pair_and_merge,
|
| 42 |
+
rna_merge,
|
| 43 |
+
)
|
| 44 |
+
from protenix.data.tokenizer import TokenArray
|
| 45 |
+
from protenix.utils.logger import get_logger
|
| 46 |
+
|
| 47 |
+
logger = get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
SEQ_LIMITS = {
|
| 50 |
+
"uniref100": -1,
|
| 51 |
+
"mmseqs_other": -1,
|
| 52 |
+
"uniclust30": -1,
|
| 53 |
+
"rfam": 10000,
|
| 54 |
+
"rnacentral": 10000,
|
| 55 |
+
"nucleotide": 10000,
|
| 56 |
+
}
|
| 57 |
+
MSA_MAX_SIZE = 16384
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class BaseMSAFeaturizer(ABC):
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
indexing_method: str = "sequence",
|
| 64 |
+
merge_method: str = "dense_max",
|
| 65 |
+
seq_limits: Optional[dict[str, int]] = {},
|
| 66 |
+
max_size: int = 16384,
|
| 67 |
+
**kwargs,
|
| 68 |
+
):
|
| 69 |
+
"""
|
| 70 |
+
Initializes the BaseMSAFeaturizer with the specified parameters.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
indexing_method (str): The method used for indexing the MSA. Defaults to "sequence".
|
| 74 |
+
merge_method (str): The method used for merging MSA features. Defaults to "dense_max".
|
| 75 |
+
seq_limits (Optional[dict[str, int]]): Dictionary specifying sequence limits for different databases. Defaults to an empty dictionary.
|
| 76 |
+
max_size (int): The maximum size of the MSA. Defaults to 16384.
|
| 77 |
+
**kwargs: Additional keyword arguments.
|
| 78 |
+
|
| 79 |
+
Raises:
|
| 80 |
+
AssertionError: If the provided `merge_method` or `indexing_method` is not valid.
|
| 81 |
+
"""
|
| 82 |
+
assert merge_method in ["dense_max", "dense_min", "sparse"]
|
| 83 |
+
assert indexing_method in [
|
| 84 |
+
"sequence",
|
| 85 |
+
"pdb_id",
|
| 86 |
+
"pdb_id_entity_id",
|
| 87 |
+
], f"Unknown indexing method: {indexing_method}"
|
| 88 |
+
self.indexing_method = indexing_method
|
| 89 |
+
self.merge_method = merge_method
|
| 90 |
+
self.seq_limits = seq_limits
|
| 91 |
+
self.max_size = max_size
|
| 92 |
+
|
| 93 |
+
@abstractmethod
|
| 94 |
+
def get_msa_path(self):
|
| 95 |
+
pass
|
| 96 |
+
|
| 97 |
+
@abstractmethod
|
| 98 |
+
def process_single_sequence(self):
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
def get_entity_ids(
|
| 102 |
+
self, bioassembly_dict: Mapping[str, Any], msa_entity_type: str = "prot"
|
| 103 |
+
) -> set[str]:
|
| 104 |
+
"""
|
| 105 |
+
Extracts the entity IDs that match the specified MSA entity type from the bioassembly dictionary.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
bioassembly_dict (Mapping[str, Any]): The bioassembly dictionary containing entity information.
|
| 109 |
+
msa_entity_type (str): The type of MSA entity to filter by. Defaults to "prot".
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
set[str]: A set of entity IDs that match the specified MSA entity type.
|
| 113 |
+
|
| 114 |
+
Raises:
|
| 115 |
+
AssertionError: If the provided `msa_entity_type` is not "prot" or "rna".
|
| 116 |
+
"""
|
| 117 |
+
assert msa_entity_type in ["prot", "rna"], "only protein and rna might have msa"
|
| 118 |
+
poly_type_mapping = {
|
| 119 |
+
"prot": "polypeptide",
|
| 120 |
+
"rna": "polyribonucleotide",
|
| 121 |
+
"dna": "polydeoxyribonucleotide",
|
| 122 |
+
}
|
| 123 |
+
entity_poly_type = bioassembly_dict["entity_poly_type"]
|
| 124 |
+
|
| 125 |
+
entity_ids: set[str] = {
|
| 126 |
+
entity_id
|
| 127 |
+
for entity_id, poly_type in entity_poly_type.items()
|
| 128 |
+
if poly_type_mapping[msa_entity_type] in poly_type
|
| 129 |
+
}
|
| 130 |
+
return entity_ids
|
| 131 |
+
|
| 132 |
+
def get_selected_asym_ids(
|
| 133 |
+
self,
|
| 134 |
+
bioassembly_dict: Mapping[str, Any],
|
| 135 |
+
entity_to_asym_id_int: Mapping[str, Sequence[int]],
|
| 136 |
+
selected_token_indices: Optional[torch.Tensor],
|
| 137 |
+
entity_ids: set[str],
|
| 138 |
+
) -> tuple[set[int], set[int], dict[int, str], dict[int, str], dict[str, str]]:
|
| 139 |
+
"""
|
| 140 |
+
Extracts the selected asym IDs based on the provided bioassembly dictionary and entity IDs.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
bioassembly_dict (Mapping[str, Any]): The bioassembly dictionary containing entity information.
|
| 144 |
+
entity_to_asym_id_int (Mapping[str, Sequence[int]]): Mapping from entity ID to asym ID integers.
|
| 145 |
+
selected_token_indices (Optional[torch.Tensor]): Indices of selected tokens.
|
| 146 |
+
entity_ids (set[str]): Set of entity IDs to consider.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
tuple: A tuple containing:
|
| 150 |
+
- selected_asym_ids (set[int]): Set of selected asym IDs.
|
| 151 |
+
- asym_id_ints (set[int]): Set of asym ID integers.
|
| 152 |
+
- asym_to_entity_id (dict[int, str]): Mapping from asym ID integers to entity IDs.
|
| 153 |
+
- asym_id_int_to_sequence (dict[int, str]): Mapping from asym ID integers to sequences.
|
| 154 |
+
- entity_id_to_sequence (dict[str, str]): Mapping from entity IDs to sequences.
|
| 155 |
+
"""
|
| 156 |
+
asym_to_entity_id: dict[int, str] = {}
|
| 157 |
+
# Only count the selected Prot/RNA entities, many-to-one mapping
|
| 158 |
+
for entity_id, asym_id_int_list in entity_to_asym_id_int.items():
|
| 159 |
+
if entity_id in entity_ids:
|
| 160 |
+
for asym_id_int in asym_id_int_list:
|
| 161 |
+
asym_to_entity_id[asym_id_int] = entity_id
|
| 162 |
+
entity_id_to_sequence = {
|
| 163 |
+
k: v
|
| 164 |
+
for (k, v) in bioassembly_dict["sequences"].items()
|
| 165 |
+
if k in entity_ids and k in entity_to_asym_id_int
|
| 166 |
+
}
|
| 167 |
+
asym_id_ints = set(
|
| 168 |
+
[
|
| 169 |
+
asym_id_int
|
| 170 |
+
for (asym_id_int, entity_id) in asym_to_entity_id.items()
|
| 171 |
+
if entity_id in entity_ids
|
| 172 |
+
]
|
| 173 |
+
)
|
| 174 |
+
# Only count Prot/RNA chains, many-to-one mapping
|
| 175 |
+
asym_id_int_to_sequence = {
|
| 176 |
+
asym_id_int: entity_id_to_sequence[entity_id]
|
| 177 |
+
for (asym_id_int, entity_id) in asym_to_entity_id.items()
|
| 178 |
+
}
|
| 179 |
+
atom_array = bioassembly_dict["atom_array"]
|
| 180 |
+
token_array = bioassembly_dict["token_array"]
|
| 181 |
+
|
| 182 |
+
if selected_token_indices is None:
|
| 183 |
+
selected_asym_ids = set(
|
| 184 |
+
[
|
| 185 |
+
atom_array[idx].asym_id_int
|
| 186 |
+
for idx in token_array.get_annotation("centre_atom_index")
|
| 187 |
+
]
|
| 188 |
+
)
|
| 189 |
+
else:
|
| 190 |
+
selected_asym_ids = set(
|
| 191 |
+
[
|
| 192 |
+
atom_array[idx].asym_id_int
|
| 193 |
+
for idx in token_array[selected_token_indices].get_annotation(
|
| 194 |
+
"centre_atom_index"
|
| 195 |
+
)
|
| 196 |
+
]
|
| 197 |
+
)
|
| 198 |
+
return (
|
| 199 |
+
selected_asym_ids,
|
| 200 |
+
asym_id_ints,
|
| 201 |
+
asym_to_entity_id,
|
| 202 |
+
asym_id_int_to_sequence,
|
| 203 |
+
entity_id_to_sequence,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
def get_msa_pipeline(
|
| 207 |
+
self,
|
| 208 |
+
is_homomer_or_monomer: bool,
|
| 209 |
+
selected_asym_ids: set[int],
|
| 210 |
+
asym_to_entity_id: dict[int, str],
|
| 211 |
+
asym_id_int_to_sequence: dict[int, str],
|
| 212 |
+
entity_id_to_sequence: dict[str, str],
|
| 213 |
+
bioassembly_dict: Mapping[str, Any],
|
| 214 |
+
entity_to_asym_id_int: Mapping[str, Sequence[int]],
|
| 215 |
+
msa_entity_type="prot",
|
| 216 |
+
) -> Optional[dict[str, np.ndarray]]:
|
| 217 |
+
"""
|
| 218 |
+
Processes the MSA pipeline for the given bioassembly dictionary and selected asym IDs.
|
| 219 |
+
|
| 220 |
+
Args:
|
| 221 |
+
is_homomer_or_monomer (bool): Indicates if the sequence is a homomer or monomer.
|
| 222 |
+
selected_asym_ids (set[int]): Set of selected asym IDs.
|
| 223 |
+
asym_to_entity_id (dict[int, str]): Mapping from asym ID integers to entity IDs.
|
| 224 |
+
asym_id_int_to_sequence (dict[int, str]): Mapping from asym ID integers to sequences.
|
| 225 |
+
entity_id_to_sequence (dict[str, str]): Mapping from entity IDs to sequences.
|
| 226 |
+
bioassembly_dict (Mapping[str, Any]): The bioassembly dictionary containing entity information.
|
| 227 |
+
entity_to_asym_id_int (Mapping[str, Sequence[int]]): Mapping from entity ID to asym ID integers.
|
| 228 |
+
msa_entity_type (str): The type of MSA entity to process. Defaults to "prot".
|
| 229 |
+
|
| 230 |
+
Returns:
|
| 231 |
+
Optional[dict[str, np.ndarray]]: A dictionary containing the processed MSA features, or None if no features are processed.
|
| 232 |
+
|
| 233 |
+
Raises:
|
| 234 |
+
AssertionError: If `msa_entity_type` is "rna" and `is_homomer_or_monomer` is False.
|
| 235 |
+
"""
|
| 236 |
+
if msa_entity_type == "rna":
|
| 237 |
+
assert is_homomer_or_monomer, "RNA MSAs do not pairing"
|
| 238 |
+
pdb_id = bioassembly_dict["pdb_id"]
|
| 239 |
+
sequence_to_features: dict[str, dict[str, Any]] = {}
|
| 240 |
+
|
| 241 |
+
for entity_id, sequence in entity_id_to_sequence.items():
|
| 242 |
+
if sequence in sequence_to_features:
|
| 243 |
+
# It is possible that different entity ids correspond to the same sequence
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
if all(
|
| 247 |
+
[
|
| 248 |
+
asym_id_int not in selected_asym_ids
|
| 249 |
+
for asym_id_int in entity_to_asym_id_int[entity_id]
|
| 250 |
+
]
|
| 251 |
+
):
|
| 252 |
+
# All chains corresponding to this entity are not selected
|
| 253 |
+
continue
|
| 254 |
+
|
| 255 |
+
sequence_feat = self.process_single_sequence(
|
| 256 |
+
pdb_name=f"{pdb_id}_{entity_id}",
|
| 257 |
+
sequence=sequence,
|
| 258 |
+
pdb_id=pdb_id,
|
| 259 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 260 |
+
)
|
| 261 |
+
sequence_feat = convert_monomer_features(sequence_feat)
|
| 262 |
+
sequence_to_features[sequence] = sequence_feat
|
| 263 |
+
|
| 264 |
+
all_chain_features = {
|
| 265 |
+
asym_id_int: deepcopy(sequence_to_features[seq])
|
| 266 |
+
for asym_id_int, seq in asym_id_int_to_sequence.items()
|
| 267 |
+
if seq in sequence_to_features
|
| 268 |
+
}
|
| 269 |
+
del sequence_to_features
|
| 270 |
+
|
| 271 |
+
if len(all_chain_features) == 0:
|
| 272 |
+
return None
|
| 273 |
+
np_example = merge_all_chain_features(
|
| 274 |
+
pdb_id=pdb_id,
|
| 275 |
+
all_chain_features=all_chain_features,
|
| 276 |
+
asym_to_entity_id=asym_to_entity_id,
|
| 277 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 278 |
+
merge_method=self.merge_method,
|
| 279 |
+
max_size=self.max_size,
|
| 280 |
+
msa_entity_type=msa_entity_type,
|
| 281 |
+
)
|
| 282 |
+
return np_example
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class PROTMSAFeaturizer(BaseMSAFeaturizer):
|
| 286 |
+
def __init__(
|
| 287 |
+
self,
|
| 288 |
+
dataset_name: str = "",
|
| 289 |
+
seq_to_pdb_idx_path: str = "",
|
| 290 |
+
distillation_index_file: str = None,
|
| 291 |
+
indexing_method: str = "sequence",
|
| 292 |
+
pairing_db: Optional[str] = "",
|
| 293 |
+
non_pairing_db: str = "mmseqs_all",
|
| 294 |
+
merge_method: str = "dense_max",
|
| 295 |
+
seq_limits: Optional[dict[str, int]] = {},
|
| 296 |
+
max_size: int = 16384,
|
| 297 |
+
pdb_jackhmmer_dir: str = None,
|
| 298 |
+
pdb_mmseqs_dir: str = None,
|
| 299 |
+
distillation_mmseqs_dir: str = None,
|
| 300 |
+
distillation_uniclust_dir: str = None,
|
| 301 |
+
**kwargs,
|
| 302 |
+
):
|
| 303 |
+
super().__init__(
|
| 304 |
+
indexing_method=indexing_method,
|
| 305 |
+
merge_method=merge_method,
|
| 306 |
+
seq_limits=seq_limits,
|
| 307 |
+
max_size=max_size,
|
| 308 |
+
**kwargs,
|
| 309 |
+
)
|
| 310 |
+
self.dataset_name = dataset_name
|
| 311 |
+
self.pdb_jackhmmer_dir = pdb_jackhmmer_dir
|
| 312 |
+
self.pdb_mmseqs_dir = pdb_mmseqs_dir
|
| 313 |
+
self.distillation_mmseqs_dir = distillation_mmseqs_dir
|
| 314 |
+
self.distillation_uniclust_dir = distillation_uniclust_dir
|
| 315 |
+
self.pairing_db = pairing_db if len(pairing_db) > 0 else None
|
| 316 |
+
|
| 317 |
+
if non_pairing_db == "mmseqs_all":
|
| 318 |
+
self.non_pairing_db = ["uniref100", "mmseqs_other"]
|
| 319 |
+
else:
|
| 320 |
+
self.non_pairing_db = [db_name for db_name in non_pairing_db.split(",")]
|
| 321 |
+
|
| 322 |
+
with open(seq_to_pdb_idx_path, "r") as f:
|
| 323 |
+
self.seq_to_pdb_idx = json.load(f)
|
| 324 |
+
# If distillation data is avaiable
|
| 325 |
+
if distillation_index_file is not None:
|
| 326 |
+
with open(distillation_index_file, "r") as f:
|
| 327 |
+
self.distillation_pdb_id_to_msa_dir = json.load(f)
|
| 328 |
+
else:
|
| 329 |
+
self.distillation_pdb_id_to_msa_dir = None
|
| 330 |
+
|
| 331 |
+
def get_msa_path(self, db_name: str, sequence: str, pdb_id: str) -> str:
|
| 332 |
+
"""
|
| 333 |
+
Get the path of an MSA file
|
| 334 |
+
|
| 335 |
+
Args:
|
| 336 |
+
db_name (str): name of genomics database
|
| 337 |
+
sequence (str): input sequence
|
| 338 |
+
pdb_id (str): pdb_id of input sequence
|
| 339 |
+
|
| 340 |
+
Returns:
|
| 341 |
+
str: file path
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
if self.indexing_method == "pdb_id" and self.distillation_pdb_id_to_msa_dir:
|
| 345 |
+
rel_path = self.distillation_pdb_id_to_msa_dir[pdb_id]
|
| 346 |
+
|
| 347 |
+
if db_name == "uniclust30":
|
| 348 |
+
msa_dir_path = opjoin(self.distillation_uniclust_dir, rel_path)
|
| 349 |
+
elif db_name in ["uniref100", "mmseqs_other"]:
|
| 350 |
+
msa_dir_path = opjoin(self.distillation_mmseqs_dir, rel_path)
|
| 351 |
+
else:
|
| 352 |
+
raise ValueError(
|
| 353 |
+
f"Indexing with {self.indexing_method} is not supported for {db_name}"
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
if opexists(msa_path := opjoin(msa_dir_path, f"{db_name}_hits.a3m")):
|
| 357 |
+
return msa_path
|
| 358 |
+
else:
|
| 359 |
+
return opjoin(msa_dir_path, f"{db_name}.a3m")
|
| 360 |
+
else:
|
| 361 |
+
# indexing_method == "sequence"
|
| 362 |
+
pdb_index = self.seq_to_pdb_idx[sequence]
|
| 363 |
+
if db_name in ["uniref100", "mmseqs_other"]:
|
| 364 |
+
return opjoin(
|
| 365 |
+
self.pdb_mmseqs_dir, str(pdb_index), f"{db_name}_hits.a3m"
|
| 366 |
+
)
|
| 367 |
+
else:
|
| 368 |
+
return opjoin(
|
| 369 |
+
self.pdb_jackhmmer_dir,
|
| 370 |
+
f"pdb_on_{db_name}",
|
| 371 |
+
"results",
|
| 372 |
+
f"{pdb_index}.a3m",
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def process_single_sequence(
|
| 376 |
+
self,
|
| 377 |
+
pdb_name: str,
|
| 378 |
+
sequence: str,
|
| 379 |
+
pdb_id: str,
|
| 380 |
+
is_homomer_or_monomer: bool,
|
| 381 |
+
) -> dict[str, np.ndarray]:
|
| 382 |
+
"""
|
| 383 |
+
Get basic MSA features for a single sequence.
|
| 384 |
+
|
| 385 |
+
Args:
|
| 386 |
+
pdb_name (str): f"{pdb_id}_{entity_id}" of the input entity
|
| 387 |
+
sequence (str): input sequnce
|
| 388 |
+
pdb_id (str): pdb_id of input sequence
|
| 389 |
+
is_homomer_or_monomer (bool): True if the input sequence is a homomer or a monomer
|
| 390 |
+
|
| 391 |
+
Returns:
|
| 392 |
+
Dict[str, np.ndarray]: the basic MSA features of the input sequence
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
raw_msa_paths, seq_limits = [], []
|
| 396 |
+
for db_name in self.non_pairing_db:
|
| 397 |
+
if opexists(
|
| 398 |
+
path := self.get_msa_path(db_name, sequence, pdb_id)
|
| 399 |
+
) and path.endswith(".a3m"):
|
| 400 |
+
raw_msa_paths.append(path)
|
| 401 |
+
seq_limits.append(self.seq_limits.get(db_name, SEQ_LIMITS[db_name]))
|
| 402 |
+
|
| 403 |
+
# Get sequence and non-pairing msa features
|
| 404 |
+
sequence_features = process_single_sequence(
|
| 405 |
+
pdb_name=pdb_name,
|
| 406 |
+
sequence=sequence,
|
| 407 |
+
raw_msa_paths=raw_msa_paths,
|
| 408 |
+
seq_limits=seq_limits,
|
| 409 |
+
msa_entity_type="prot",
|
| 410 |
+
msa_type="non_pairing",
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
# Get pairing msa features
|
| 414 |
+
if not is_homomer_or_monomer:
|
| 415 |
+
# Separately process the MSA needed for pairing
|
| 416 |
+
raw_msa_paths, seq_limits = [], []
|
| 417 |
+
if opexists(
|
| 418 |
+
path := self.get_msa_path(self.pairing_db, sequence, pdb_id)
|
| 419 |
+
) and path.endswith(".a3m"):
|
| 420 |
+
raw_msa_paths = [
|
| 421 |
+
path,
|
| 422 |
+
]
|
| 423 |
+
seq_limits.append(
|
| 424 |
+
self.seq_limits.get(self.pairing_db, SEQ_LIMITS[self.pairing_db])
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
if len(raw_msa_paths) == 0:
|
| 428 |
+
raise ValueError(f"{pdb_name} does not have MSA for pairing")
|
| 429 |
+
|
| 430 |
+
all_seq_msa_features = load_and_process_msa(
|
| 431 |
+
pdb_name=pdb_name,
|
| 432 |
+
msa_type="pairing",
|
| 433 |
+
raw_msa_paths=raw_msa_paths,
|
| 434 |
+
seq_limits=seq_limits,
|
| 435 |
+
identifier_func=get_identifier_func(pairing_db=self.pairing_db),
|
| 436 |
+
handle_empty="raise_error",
|
| 437 |
+
)
|
| 438 |
+
sequence_features.update(all_seq_msa_features)
|
| 439 |
+
|
| 440 |
+
return sequence_features
|
| 441 |
+
|
| 442 |
+
def get_msa_features_for_assembly(
|
| 443 |
+
self,
|
| 444 |
+
bioassembly_dict: Mapping[str, Any],
|
| 445 |
+
entity_to_asym_id_int: Mapping[str, Sequence[int]],
|
| 446 |
+
selected_token_indices: Optional[torch.Tensor],
|
| 447 |
+
) -> dict[str, np.ndarray]:
|
| 448 |
+
"""
|
| 449 |
+
Get MSA features for the bioassembly.
|
| 450 |
+
|
| 451 |
+
Args:
|
| 452 |
+
bioassembly_dict (Mapping[str, Any]): the bioassembly dict with sequence, atom_array and token_array.
|
| 453 |
+
entity_to_asym_id_int (Mapping[str, Sequence[int]]): mapping from entity_id to asym_id_int.
|
| 454 |
+
selected_token_indices (torch.Tensor): Cropped token indices.
|
| 455 |
+
|
| 456 |
+
Returns:
|
| 457 |
+
Dict[str, np.ndarray]: the basic MSA features of the bioassembly.
|
| 458 |
+
"""
|
| 459 |
+
protein_entity_ids = self.get_entity_ids(
|
| 460 |
+
bioassembly_dict, msa_entity_type="prot"
|
| 461 |
+
)
|
| 462 |
+
if len(protein_entity_ids) == 0:
|
| 463 |
+
return None
|
| 464 |
+
(
|
| 465 |
+
selected_asym_ids,
|
| 466 |
+
asym_id_ints,
|
| 467 |
+
asym_to_entity_id,
|
| 468 |
+
asym_id_int_to_sequence,
|
| 469 |
+
entity_id_to_sequence,
|
| 470 |
+
) = self.get_selected_asym_ids(
|
| 471 |
+
bioassembly_dict=bioassembly_dict,
|
| 472 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 473 |
+
selected_token_indices=selected_token_indices,
|
| 474 |
+
entity_ids=protein_entity_ids,
|
| 475 |
+
)
|
| 476 |
+
# No pairing_db specified (all proteins are treated as monomers) or only one sequence
|
| 477 |
+
is_homomer_or_monomer = (self.pairing_db is None) or (
|
| 478 |
+
len(
|
| 479 |
+
set(
|
| 480 |
+
[
|
| 481 |
+
asym_id_int_to_sequence[asym_id_int]
|
| 482 |
+
for asym_id_int in selected_asym_ids
|
| 483 |
+
if asym_id_int in asym_id_ints
|
| 484 |
+
]
|
| 485 |
+
)
|
| 486 |
+
)
|
| 487 |
+
== 1
|
| 488 |
+
)
|
| 489 |
+
np_example = self.get_msa_pipeline(
|
| 490 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 491 |
+
selected_asym_ids=selected_asym_ids,
|
| 492 |
+
asym_to_entity_id=asym_to_entity_id,
|
| 493 |
+
asym_id_int_to_sequence=asym_id_int_to_sequence,
|
| 494 |
+
entity_id_to_sequence=entity_id_to_sequence,
|
| 495 |
+
bioassembly_dict=bioassembly_dict,
|
| 496 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 497 |
+
msa_entity_type="prot",
|
| 498 |
+
)
|
| 499 |
+
return np_example
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
class RNAMSAFeaturizer(BaseMSAFeaturizer):
|
| 503 |
+
def __init__(
|
| 504 |
+
self,
|
| 505 |
+
seq_to_pdb_idx_path: str = "",
|
| 506 |
+
indexing_method: str = "sequence",
|
| 507 |
+
merge_method: str = "dense_max",
|
| 508 |
+
seq_limits: Optional[dict[str, int]] = {},
|
| 509 |
+
max_size: int = 16384,
|
| 510 |
+
rna_msa_dir: str = None,
|
| 511 |
+
**kwargs,
|
| 512 |
+
) -> None:
|
| 513 |
+
super().__init__(
|
| 514 |
+
indexing_method=indexing_method,
|
| 515 |
+
merge_method=merge_method,
|
| 516 |
+
seq_limits=seq_limits,
|
| 517 |
+
max_size=max_size,
|
| 518 |
+
**kwargs,
|
| 519 |
+
)
|
| 520 |
+
# By default, use all the database in paper
|
| 521 |
+
self.rna_msa_dir = rna_msa_dir
|
| 522 |
+
self.non_pairing_db = ["rfam", "rnacentral", "nucleotide"]
|
| 523 |
+
with open(seq_to_pdb_idx_path, "r") as f:
|
| 524 |
+
self.seq_to_pdb_idx = json.load(f) # it's rna sequence to pdb list
|
| 525 |
+
|
| 526 |
+
def get_msa_path(
|
| 527 |
+
self, db_name: str, sequence: str, pdb_id_entity_id: str, reduced: bool = True
|
| 528 |
+
) -> str:
|
| 529 |
+
"""
|
| 530 |
+
Get the path of an RNA MSA file
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
db_name (str): genetics databases for RNA chains
|
| 534 |
+
sequence (str): input sequence
|
| 535 |
+
pdb_id_entity_id (str): pdb_id_entity_id of input sequence
|
| 536 |
+
reduced (bool): whether reduce the sto files to max 1w
|
| 537 |
+
|
| 538 |
+
Returns:
|
| 539 |
+
str: file path
|
| 540 |
+
"""
|
| 541 |
+
assert self.indexing_method in [
|
| 542 |
+
"pdb_id_entity_id",
|
| 543 |
+
"sequence",
|
| 544 |
+
], "use the pdb_id_entity_id or sequence to search msa dir"
|
| 545 |
+
if reduced:
|
| 546 |
+
suffix = "_max_1w"
|
| 547 |
+
else:
|
| 548 |
+
suffix = ""
|
| 549 |
+
if self.indexing_method == "sequence":
|
| 550 |
+
# only the first pdb save the rna msa
|
| 551 |
+
if sequence in self.seq_to_pdb_idx:
|
| 552 |
+
pdb_id_entity_id = self.seq_to_pdb_idx[sequence][0]
|
| 553 |
+
else:
|
| 554 |
+
logger.info(f"{pdb_id_entity_id} not in seq_to_pdb_idx")
|
| 555 |
+
pdb_id_entity_id = "not_exist"
|
| 556 |
+
|
| 557 |
+
rel_path = f"{pdb_id_entity_id}/{db_name}.sto"
|
| 558 |
+
msa_dir_path = opjoin(f"{self.rna_msa_dir}{suffix}", rel_path)
|
| 559 |
+
return msa_dir_path
|
| 560 |
+
|
| 561 |
+
def process_single_sequence(
|
| 562 |
+
self,
|
| 563 |
+
pdb_name: str,
|
| 564 |
+
sequence: str,
|
| 565 |
+
pdb_id: str,
|
| 566 |
+
is_homomer_or_monomer: bool,
|
| 567 |
+
) -> dict[str, np.ndarray]:
|
| 568 |
+
"""
|
| 569 |
+
Get basic MSA features for a single sequence.
|
| 570 |
+
|
| 571 |
+
Args:
|
| 572 |
+
pdb_name (str): f"{pdb_id}_{entity_id}" of the input entity
|
| 573 |
+
sequence (str): input sequnce
|
| 574 |
+
pdb_id (str): pdb_id of input sequence
|
| 575 |
+
is_homomer_or_monomer (bool): True if the input sequence is a homomer or a monomer
|
| 576 |
+
|
| 577 |
+
Returns:
|
| 578 |
+
Dict[str, np.ndarray]: the basic MSA features of the input sequence
|
| 579 |
+
"""
|
| 580 |
+
raw_msa_paths, seq_limits = [], []
|
| 581 |
+
for db_name in self.non_pairing_db:
|
| 582 |
+
if opexists(
|
| 583 |
+
path := self.get_msa_path(db_name, sequence, pdb_name)
|
| 584 |
+
) and path.endswith(".sto"):
|
| 585 |
+
raw_msa_paths.append(path)
|
| 586 |
+
seq_limits.append(self.seq_limits.get(db_name, SEQ_LIMITS[db_name]))
|
| 587 |
+
|
| 588 |
+
sequence_features = process_single_sequence(
|
| 589 |
+
pdb_name=pdb_name,
|
| 590 |
+
sequence=sequence,
|
| 591 |
+
raw_msa_paths=raw_msa_paths,
|
| 592 |
+
seq_limits=seq_limits,
|
| 593 |
+
msa_entity_type="rna",
|
| 594 |
+
msa_type="non_pairing",
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
return sequence_features
|
| 598 |
+
|
| 599 |
+
def get_msa_features_for_assembly(
|
| 600 |
+
self,
|
| 601 |
+
bioassembly_dict: Mapping[str, Any],
|
| 602 |
+
entity_to_asym_id_int: Mapping[str, Sequence[int]],
|
| 603 |
+
selected_token_indices: Optional[torch.Tensor],
|
| 604 |
+
) -> dict[str, np.ndarray]:
|
| 605 |
+
"""
|
| 606 |
+
Get MSA features for the bioassembly.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
bioassembly_dict (Mapping[str, Any]): the bioassembly dict with sequence, atom_array and token_array.
|
| 610 |
+
entity_to_asym_id_int (Mapping[str, Sequence[int]]): mapping from entity_id to asym_id_int.
|
| 611 |
+
selected_token_indices (torch.Tensor): Cropped token indices.
|
| 612 |
+
|
| 613 |
+
Returns:
|
| 614 |
+
Dict[str, np.ndarray]: the basic MSA features of the bioassembly.
|
| 615 |
+
"""
|
| 616 |
+
rna_entity_ids = self.get_entity_ids(bioassembly_dict, msa_entity_type="rna")
|
| 617 |
+
if len(rna_entity_ids) == 0:
|
| 618 |
+
return None
|
| 619 |
+
(
|
| 620 |
+
selected_asym_ids,
|
| 621 |
+
asym_id_ints,
|
| 622 |
+
asym_to_entity_id,
|
| 623 |
+
asym_id_int_to_sequence,
|
| 624 |
+
entity_id_to_sequence,
|
| 625 |
+
) = self.get_selected_asym_ids(
|
| 626 |
+
bioassembly_dict=bioassembly_dict,
|
| 627 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 628 |
+
selected_token_indices=selected_token_indices,
|
| 629 |
+
entity_ids=rna_entity_ids,
|
| 630 |
+
)
|
| 631 |
+
is_homomer_or_monomer = True
|
| 632 |
+
np_example = self.get_msa_pipeline(
|
| 633 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 634 |
+
selected_asym_ids=selected_asym_ids,
|
| 635 |
+
asym_to_entity_id=asym_to_entity_id,
|
| 636 |
+
asym_id_int_to_sequence=asym_id_int_to_sequence,
|
| 637 |
+
entity_id_to_sequence=entity_id_to_sequence,
|
| 638 |
+
bioassembly_dict=bioassembly_dict,
|
| 639 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 640 |
+
msa_entity_type="rna",
|
| 641 |
+
)
|
| 642 |
+
return np_example
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class MSAFeaturizer:
|
| 646 |
+
def __init__(
|
| 647 |
+
self,
|
| 648 |
+
prot_msa_args: dict = {},
|
| 649 |
+
rna_msa_args: dict = {},
|
| 650 |
+
enable_rna_msa: bool = False,
|
| 651 |
+
):
|
| 652 |
+
self.prot_msa_featurizer = PROTMSAFeaturizer(**prot_msa_args)
|
| 653 |
+
self.enable_rna_msa = enable_rna_msa
|
| 654 |
+
if self.enable_rna_msa:
|
| 655 |
+
self.rna_msa_featurizer = RNAMSAFeaturizer(**rna_msa_args)
|
| 656 |
+
|
| 657 |
+
def __call__(
|
| 658 |
+
self,
|
| 659 |
+
bioassembly_dict: dict[str, Any],
|
| 660 |
+
selected_indices: np.ndarray,
|
| 661 |
+
entity_to_asym_id_int: Mapping[str, int],
|
| 662 |
+
) -> Optional[dict[str, np.ndarray]]:
|
| 663 |
+
"""
|
| 664 |
+
Processes the bioassembly dictionary to generate MSA features for both protein and RNA entities, if enabled.
|
| 665 |
+
|
| 666 |
+
Args:
|
| 667 |
+
bioassembly_dict (dict[str, Any]): The bioassembly dictionary containing entity information.
|
| 668 |
+
selected_indices (np.ndarray): Indices of selected tokens.
|
| 669 |
+
entity_to_asym_id_int (Mapping[str, int]): Mapping from entity ID to asym ID integers.
|
| 670 |
+
|
| 671 |
+
Returns:
|
| 672 |
+
Optional[dict[str, np.ndarray]]: A dictionary containing the merged MSA features for the bioassembly, or None if no features are generated.
|
| 673 |
+
"""
|
| 674 |
+
prot_msa_feats = self.prot_msa_featurizer.get_msa_features_for_assembly(
|
| 675 |
+
bioassembly_dict=bioassembly_dict,
|
| 676 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 677 |
+
selected_token_indices=selected_indices,
|
| 678 |
+
)
|
| 679 |
+
if self.enable_rna_msa:
|
| 680 |
+
rna_msa_feats = self.rna_msa_featurizer.get_msa_features_for_assembly(
|
| 681 |
+
bioassembly_dict=bioassembly_dict,
|
| 682 |
+
entity_to_asym_id_int=entity_to_asym_id_int,
|
| 683 |
+
selected_token_indices=selected_indices,
|
| 684 |
+
)
|
| 685 |
+
else:
|
| 686 |
+
rna_msa_feats = None
|
| 687 |
+
np_chains_list = []
|
| 688 |
+
if prot_msa_feats is not None:
|
| 689 |
+
np_chains_list.append(prot_msa_feats)
|
| 690 |
+
if rna_msa_feats is not None:
|
| 691 |
+
np_chains_list.append(rna_msa_feats)
|
| 692 |
+
if len(np_chains_list) == 0:
|
| 693 |
+
return None
|
| 694 |
+
|
| 695 |
+
msa_feats = merge_features_from_prot_rna(np_chains_list)
|
| 696 |
+
msa_feats = self.tokenize(
|
| 697 |
+
msa_feats=msa_feats,
|
| 698 |
+
token_array=bioassembly_dict["token_array"],
|
| 699 |
+
atom_array=bioassembly_dict["atom_array"],
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
return msa_feats
|
| 703 |
+
|
| 704 |
+
def tokenize(
|
| 705 |
+
self,
|
| 706 |
+
msa_feats: Mapping[str, np.ndarray],
|
| 707 |
+
token_array: TokenArray,
|
| 708 |
+
atom_array: AtomArray,
|
| 709 |
+
) -> dict[str, np.ndarray]:
|
| 710 |
+
"""
|
| 711 |
+
Tokenize raw MSA features.
|
| 712 |
+
|
| 713 |
+
Args:
|
| 714 |
+
msa_feats (Dict[str, np.ndarray]): raw MSA features.
|
| 715 |
+
token_array (TokenArray): token array of this bioassembly
|
| 716 |
+
atom_array (AtomArray): atom array of this bioassembly
|
| 717 |
+
|
| 718 |
+
Returns:
|
| 719 |
+
Dict[str, np.ndarray]: the tokenized MSA features of the bioassembly.
|
| 720 |
+
"""
|
| 721 |
+
msa_feats = tokenize_msa(
|
| 722 |
+
msa_feats=msa_feats, token_array=token_array, atom_array=atom_array
|
| 723 |
+
)
|
| 724 |
+
# Add to tracking for msa analysis
|
| 725 |
+
msa_feats.update(
|
| 726 |
+
{
|
| 727 |
+
"prot_pair_num_alignments": msa_feats.get(
|
| 728 |
+
"prot_pair_num_alignments", np.asarray(0, dtype=np.int32)
|
| 729 |
+
),
|
| 730 |
+
"prot_unpair_num_alignments": msa_feats.get(
|
| 731 |
+
"prot_unpair_num_alignments", np.asarray(0, dtype=np.int32)
|
| 732 |
+
),
|
| 733 |
+
"rna_pair_num_alignments": msa_feats.get(
|
| 734 |
+
"rna_pair_num_alignments", np.asarray(0, dtype=np.int32)
|
| 735 |
+
),
|
| 736 |
+
"rna_unpair_num_alignments": msa_feats.get(
|
| 737 |
+
"rna_unpair_num_alignments", np.asarray(0, dtype=np.int32)
|
| 738 |
+
),
|
| 739 |
+
}
|
| 740 |
+
)
|
| 741 |
+
return {
|
| 742 |
+
k: v
|
| 743 |
+
for (k, v) in msa_feats.items()
|
| 744 |
+
if k
|
| 745 |
+
in ["msa", "has_deletion", "deletion_value", "deletion_mean", "profile"]
|
| 746 |
+
+ [
|
| 747 |
+
"prot_pair_num_alignments",
|
| 748 |
+
"prot_unpair_num_alignments",
|
| 749 |
+
"rna_pair_num_alignments",
|
| 750 |
+
"rna_unpair_num_alignments",
|
| 751 |
+
]
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
# Common function for train and inference
|
| 756 |
+
def process_single_sequence(
|
| 757 |
+
pdb_name: str,
|
| 758 |
+
sequence: str,
|
| 759 |
+
raw_msa_paths: Optional[list[str]],
|
| 760 |
+
seq_limits: Optional[list[str]],
|
| 761 |
+
msa_entity_type: str = "prot",
|
| 762 |
+
msa_type: str = "non_pairing",
|
| 763 |
+
) -> FeatureDict:
|
| 764 |
+
"""
|
| 765 |
+
Processes a single sequence to generate sequence and MSA features.
|
| 766 |
+
|
| 767 |
+
Args:
|
| 768 |
+
pdb_name (str): The name of the PDB entry.
|
| 769 |
+
sequence (str): The input sequence.
|
| 770 |
+
raw_msa_paths (Optional[list[str]]): List of paths to raw MSA files.
|
| 771 |
+
seq_limits (Optional[list[str]]): List of sequence limits for different databases.
|
| 772 |
+
msa_entity_type (str): The type of MSA entity, either "prot" or "rna". Defaults to "prot".
|
| 773 |
+
msa_type (str): The type of MSA, either "non_pairing" or "pairing". Defaults to "non_pairing".
|
| 774 |
+
|
| 775 |
+
Returns:
|
| 776 |
+
FeatureDict: A dictionary containing the sequence and MSA features.
|
| 777 |
+
|
| 778 |
+
Raises:
|
| 779 |
+
AssertionError: If `msa_entity_type` is not "prot" or "rna".
|
| 780 |
+
"""
|
| 781 |
+
assert msa_entity_type in ["prot", "rna"]
|
| 782 |
+
num_res = len(sequence)
|
| 783 |
+
|
| 784 |
+
if msa_entity_type == "prot":
|
| 785 |
+
sequence_features = make_sequence_features(
|
| 786 |
+
sequence=sequence,
|
| 787 |
+
num_res=num_res,
|
| 788 |
+
)
|
| 789 |
+
elif msa_entity_type == "rna":
|
| 790 |
+
sequence_features = make_sequence_features(
|
| 791 |
+
sequence=sequence,
|
| 792 |
+
num_res=num_res,
|
| 793 |
+
mapping=rna_order_with_x,
|
| 794 |
+
x_token="N",
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
msa_features = load_and_process_msa(
|
| 798 |
+
pdb_name=pdb_name,
|
| 799 |
+
msa_type=msa_type,
|
| 800 |
+
raw_msa_paths=raw_msa_paths,
|
| 801 |
+
seq_limits=seq_limits,
|
| 802 |
+
input_sequence=sequence,
|
| 803 |
+
msa_entity_type=msa_entity_type,
|
| 804 |
+
)
|
| 805 |
+
sequence_features.update(msa_features)
|
| 806 |
+
return sequence_features
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
# Common function for train and inference
|
| 810 |
+
def tokenize_msa(
|
| 811 |
+
msa_feats: Mapping[str, np.ndarray],
|
| 812 |
+
token_array: TokenArray,
|
| 813 |
+
atom_array: AtomArray,
|
| 814 |
+
) -> dict[str, np.ndarray]:
|
| 815 |
+
"""
|
| 816 |
+
Tokenize raw MSA features.
|
| 817 |
+
|
| 818 |
+
Args:
|
| 819 |
+
msa_feats (Dict[str, np.ndarray]): raw MSA features.
|
| 820 |
+
token_array (TokenArray): token array of this bioassembly
|
| 821 |
+
atom_array (AtomArray): atom array of this bioassembly
|
| 822 |
+
|
| 823 |
+
Returns:
|
| 824 |
+
Dict[str, np.ndarray]: the tokenized MSA features of the bioassembly.
|
| 825 |
+
"""
|
| 826 |
+
token_center_atom_idxs = token_array.get_annotation("centre_atom_index")
|
| 827 |
+
# res_id: (asym_id, residue_index)
|
| 828 |
+
# msa_idx refers to the column number of a residue in the msa array
|
| 829 |
+
res_id_2_msa_idx = {
|
| 830 |
+
(msa_feats["asym_id"][idx], msa_feats["residue_index"][idx]): idx
|
| 831 |
+
for idx in range(msa_feats["msa"].shape[1])
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
restypes = []
|
| 835 |
+
col_idxs_in_msa = []
|
| 836 |
+
col_idxs_in_new_msa = []
|
| 837 |
+
for token_idx, center_atom_idx in enumerate(token_center_atom_idxs):
|
| 838 |
+
restypes.append(STD_RESIDUES[atom_array.cano_seq_resname[center_atom_idx]])
|
| 839 |
+
if (
|
| 840 |
+
res_id := (
|
| 841 |
+
atom_array[center_atom_idx].asym_id_int,
|
| 842 |
+
atom_array[center_atom_idx].res_id,
|
| 843 |
+
)
|
| 844 |
+
) in res_id_2_msa_idx:
|
| 845 |
+
col_idxs_in_msa.append(res_id_2_msa_idx[res_id])
|
| 846 |
+
col_idxs_in_new_msa.append(token_idx)
|
| 847 |
+
|
| 848 |
+
num_msa_seq, _ = msa_feats["msa"].shape
|
| 849 |
+
num_tokens = len(token_center_atom_idxs)
|
| 850 |
+
|
| 851 |
+
restypes = np.array(restypes)
|
| 852 |
+
col_idxs_in_new_msa = np.array(col_idxs_in_new_msa)
|
| 853 |
+
col_idxs_in_msa = np.array(col_idxs_in_msa)
|
| 854 |
+
|
| 855 |
+
# msa
|
| 856 |
+
# For non-amino acid tokens, copy the token itself
|
| 857 |
+
feat_name = "msa"
|
| 858 |
+
new_feat = np.repeat(restypes[None, ...], num_msa_seq, axis=0)
|
| 859 |
+
new_feat[:, col_idxs_in_new_msa] = msa_feats[feat_name][:, col_idxs_in_msa]
|
| 860 |
+
msa_feats[feat_name] = new_feat
|
| 861 |
+
|
| 862 |
+
# has_deletion, deletion_value
|
| 863 |
+
# Assign 0 to non-amino acid tokens
|
| 864 |
+
for feat_name in ["has_deletion", "deletion_value"]:
|
| 865 |
+
new_feat = np.zeros((num_msa_seq, num_tokens), dtype=msa_feats[feat_name].dtype)
|
| 866 |
+
new_feat[:, col_idxs_in_new_msa] = msa_feats[feat_name][:, col_idxs_in_msa]
|
| 867 |
+
msa_feats[feat_name] = new_feat
|
| 868 |
+
|
| 869 |
+
# deletion_mean
|
| 870 |
+
# Assign 0 to non-amino acid tokens
|
| 871 |
+
feat_name = "deletion_mean"
|
| 872 |
+
new_feat = np.zeros((num_tokens,))
|
| 873 |
+
new_feat[col_idxs_in_new_msa] = msa_feats[feat_name][col_idxs_in_msa]
|
| 874 |
+
msa_feats[feat_name] = new_feat
|
| 875 |
+
|
| 876 |
+
# profile
|
| 877 |
+
# Assign one-hot enbedding (one-hot distribution) to non-amino acid tokens corresponding to restype
|
| 878 |
+
feat_name = "profile"
|
| 879 |
+
new_feat = np.zeros((num_tokens, 32))
|
| 880 |
+
new_feat[np.arange(num_tokens), restypes] = 1
|
| 881 |
+
new_feat[col_idxs_in_new_msa, :] = msa_feats[feat_name][col_idxs_in_msa, :]
|
| 882 |
+
msa_feats[feat_name] = new_feat
|
| 883 |
+
return msa_feats
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
# Common function for train and inference
|
| 887 |
+
def merge_all_chain_features(
|
| 888 |
+
pdb_id: str,
|
| 889 |
+
all_chain_features: dict[str, FeatureDict],
|
| 890 |
+
asym_to_entity_id: dict,
|
| 891 |
+
is_homomer_or_monomer: bool = False,
|
| 892 |
+
merge_method: str = "dense_max",
|
| 893 |
+
max_size: int = 16384,
|
| 894 |
+
msa_entity_type: str = "prot",
|
| 895 |
+
) -> dict[str, np.ndarray]:
|
| 896 |
+
"""
|
| 897 |
+
Merges features from all chains in the bioassembly.
|
| 898 |
+
|
| 899 |
+
Args:
|
| 900 |
+
pdb_id (str): The PDB ID of the bioassembly.
|
| 901 |
+
all_chain_features (dict[str, FeatureDict]): Features for each chain in the bioassembly.
|
| 902 |
+
asym_to_entity_id (dict): Mapping from asym ID to entity ID.
|
| 903 |
+
is_homomer_or_monomer (bool): Indicates if the bioassembly is a homomer or monomer. Defaults to False.
|
| 904 |
+
merge_method (str): Method used for merging features. Defaults to "dense_max".
|
| 905 |
+
max_size (int): Maximum size of the MSA. Defaults to 16384.
|
| 906 |
+
msa_entity_type (str): Type of MSA entity, either "prot" or "rna". Defaults to "prot".
|
| 907 |
+
|
| 908 |
+
Returns:
|
| 909 |
+
dict[str, np.ndarray]: Merged features for the bioassembly.
|
| 910 |
+
"""
|
| 911 |
+
all_chain_features = add_assembly_features(
|
| 912 |
+
pdb_id,
|
| 913 |
+
all_chain_features,
|
| 914 |
+
asym_to_entity_id=asym_to_entity_id,
|
| 915 |
+
)
|
| 916 |
+
if msa_entity_type == "rna":
|
| 917 |
+
np_example = rna_merge(
|
| 918 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 919 |
+
all_chain_features=all_chain_features,
|
| 920 |
+
merge_method=merge_method,
|
| 921 |
+
msa_crop_size=max_size,
|
| 922 |
+
)
|
| 923 |
+
elif msa_entity_type == "prot":
|
| 924 |
+
np_example = pair_and_merge(
|
| 925 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 926 |
+
all_chain_features=all_chain_features,
|
| 927 |
+
merge_method=merge_method,
|
| 928 |
+
msa_crop_size=max_size,
|
| 929 |
+
)
|
| 930 |
+
np_example = clip_msa(np_example, max_num_msa=max_size)
|
| 931 |
+
return np_example
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
class InferenceMSAFeaturizer(object):
|
| 935 |
+
# Now we only support protein msa in inference
|
| 936 |
+
|
| 937 |
+
@staticmethod
|
| 938 |
+
def process_prot_single_sequence(
|
| 939 |
+
sequence: str,
|
| 940 |
+
description: str,
|
| 941 |
+
is_homomer_or_monomer: bool,
|
| 942 |
+
msa_dir: Union[str, None],
|
| 943 |
+
pairing_db: str,
|
| 944 |
+
) -> FeatureDict:
|
| 945 |
+
"""
|
| 946 |
+
Processes a single protein sequence to generate sequence and MSA features.
|
| 947 |
+
|
| 948 |
+
Args:
|
| 949 |
+
sequence (str): The input protein sequence.
|
| 950 |
+
description (str): Description of the sequence, typically the PDB name.
|
| 951 |
+
is_homomer_or_monomer (bool): Indicates if the sequence is a homomer or monomer.
|
| 952 |
+
msa_dir (Union[str, None]): Directory containing the MSA files, or None if no pre-computed MSA is provided.
|
| 953 |
+
pairing_db (str): Database used for pairing.
|
| 954 |
+
|
| 955 |
+
Returns:
|
| 956 |
+
FeatureDict: A dictionary containing the sequence and MSA features.
|
| 957 |
+
|
| 958 |
+
Raises:
|
| 959 |
+
AssertionError: If the pairing MSA file does not exist when `is_homomer_or_monomer` is False.
|
| 960 |
+
"""
|
| 961 |
+
# For non-pairing MSA
|
| 962 |
+
if msa_dir is None:
|
| 963 |
+
# No pre-computed MSA was provided, and the MSA search failed
|
| 964 |
+
raw_msa_paths = []
|
| 965 |
+
else:
|
| 966 |
+
raw_msa_paths = [opjoin(msa_dir, "non_pairing.a3m")]
|
| 967 |
+
pdb_name = description
|
| 968 |
+
|
| 969 |
+
sequence_features = process_single_sequence(
|
| 970 |
+
pdb_name=pdb_name,
|
| 971 |
+
sequence=sequence,
|
| 972 |
+
raw_msa_paths=raw_msa_paths,
|
| 973 |
+
seq_limits=[-1],
|
| 974 |
+
msa_entity_type="prot",
|
| 975 |
+
msa_type="non_pairing",
|
| 976 |
+
)
|
| 977 |
+
if not is_homomer_or_monomer:
|
| 978 |
+
# Separately process the pairing MSA
|
| 979 |
+
assert opexists(
|
| 980 |
+
raw_msa_path := opjoin(msa_dir, "pairing.a3m")
|
| 981 |
+
), f"No pairing-MSA of {pdb_name} (please check {raw_msa_path})"
|
| 982 |
+
|
| 983 |
+
all_seq_msa_features = load_and_process_msa(
|
| 984 |
+
pdb_name=pdb_name,
|
| 985 |
+
msa_type="pairing",
|
| 986 |
+
raw_msa_paths=[raw_msa_path],
|
| 987 |
+
seq_limits=[-1],
|
| 988 |
+
identifier_func=get_identifier_func(
|
| 989 |
+
pairing_db=pairing_db,
|
| 990 |
+
),
|
| 991 |
+
handle_empty="raise_error",
|
| 992 |
+
)
|
| 993 |
+
sequence_features.update(all_seq_msa_features)
|
| 994 |
+
|
| 995 |
+
return sequence_features
|
| 996 |
+
|
| 997 |
+
@staticmethod
|
| 998 |
+
def get_inference_prot_msa_features_for_assembly(
|
| 999 |
+
bioassembly: Sequence[Mapping[str, Mapping[str, Any]]],
|
| 1000 |
+
entity_to_asym_id: Mapping[str, set[int]],
|
| 1001 |
+
) -> FeatureDict:
|
| 1002 |
+
"""
|
| 1003 |
+
Processes the bioassembly to generate MSA features for protein entities in inference mode.
|
| 1004 |
+
|
| 1005 |
+
Args:
|
| 1006 |
+
bioassembly (Sequence[Mapping[str, Mapping[str, Any]]]): The bioassembly containing entity information.
|
| 1007 |
+
entity_to_asym_id (Mapping[str, set[int]]): Mapping from entity ID to asym ID integers.
|
| 1008 |
+
|
| 1009 |
+
Returns:
|
| 1010 |
+
FeatureDict: A dictionary containing the MSA features for the protein entities.
|
| 1011 |
+
|
| 1012 |
+
Raises:
|
| 1013 |
+
AssertionError: If the provided precomputed MSA path does not exist.
|
| 1014 |
+
"""
|
| 1015 |
+
entity_to_asym_id_int = dict(entity_to_asym_id)
|
| 1016 |
+
asym_to_entity_id = {}
|
| 1017 |
+
entity_id_to_sequence = {}
|
| 1018 |
+
# In inference mode, the keys in bioassembly is different from training
|
| 1019 |
+
# Only contains protein entity, many-to-one mapping
|
| 1020 |
+
entity_id_to_sequence = {}
|
| 1021 |
+
for i, entity_info_wrapper in enumerate(bioassembly):
|
| 1022 |
+
entity_id = str(i + 1)
|
| 1023 |
+
entity_type = list(entity_info_wrapper.keys())[0]
|
| 1024 |
+
entity_info = entity_info_wrapper[entity_type]
|
| 1025 |
+
|
| 1026 |
+
if entity_type == PROT_TYPE_NAME:
|
| 1027 |
+
# Update entity_id_to_sequence
|
| 1028 |
+
entity_id_to_sequence[entity_id] = entity_info["sequence"]
|
| 1029 |
+
|
| 1030 |
+
# Update asym_to_entity_id
|
| 1031 |
+
for asym_id_int in entity_to_asym_id_int[entity_id]:
|
| 1032 |
+
asym_to_entity_id[asym_id_int] = entity_id
|
| 1033 |
+
if len(entity_id_to_sequence) == 0:
|
| 1034 |
+
# No protein entity
|
| 1035 |
+
return None
|
| 1036 |
+
is_homomer_or_monomer = (
|
| 1037 |
+
len(set(entity_id_to_sequence.values())) == 1
|
| 1038 |
+
) # Only one protein sequence
|
| 1039 |
+
sequence_to_entity = defaultdict(list)
|
| 1040 |
+
for entity_id, seq in entity_id_to_sequence.items():
|
| 1041 |
+
sequence_to_entity[seq].append(entity_id)
|
| 1042 |
+
|
| 1043 |
+
sequence_to_features: dict[str, dict[str, Any]] = {}
|
| 1044 |
+
msa_sequences = {}
|
| 1045 |
+
msa_dirs = {}
|
| 1046 |
+
for idx, (sequence, entity_id_list) in enumerate(sequence_to_entity.items()):
|
| 1047 |
+
msa_info = bioassembly[int(entity_id_list[0]) - 1][PROT_TYPE_NAME]["msa"]
|
| 1048 |
+
msa_dir = msa_info.get("precomputed_msa_dir", None)
|
| 1049 |
+
if msa_dir is not None:
|
| 1050 |
+
assert opexists(
|
| 1051 |
+
msa_dir
|
| 1052 |
+
), f"The provided precomputed MSA path of entities {entity_id_list} does not exists: \n{msa_dir}"
|
| 1053 |
+
msa_dirs[idx] = msa_dir
|
| 1054 |
+
else:
|
| 1055 |
+
pairing_db_fpath = msa_info.get("pairing_db_fpath", None)
|
| 1056 |
+
non_pairing_db_fpath = msa_info.get("non_pairing_db_fpath", None)
|
| 1057 |
+
assert (
|
| 1058 |
+
pairing_db_fpath is not None
|
| 1059 |
+
), "Path of pairing MSA database is not given."
|
| 1060 |
+
assert (
|
| 1061 |
+
non_pairing_db_fpath is not None
|
| 1062 |
+
), "Path of non-pairing MSA database is not given."
|
| 1063 |
+
assert msa_info["pairing_db"] in ["uniprot", "", None], (
|
| 1064 |
+
f"Using {msa_info['pairing_db']} as the source for MSA pairing "
|
| 1065 |
+
f"is not supported in online MSA searching."
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
msa_info["pairing_db"] = "uniprot"
|
| 1069 |
+
msa_sequences[idx] = (sequence, pairing_db_fpath, non_pairing_db_fpath)
|
| 1070 |
+
if len(msa_sequences) > 0:
|
| 1071 |
+
msa_dirs.update(msa_parallel(msa_sequences))
|
| 1072 |
+
|
| 1073 |
+
for idx, (sequence, entity_id_list) in enumerate(sequence_to_entity.items()):
|
| 1074 |
+
|
| 1075 |
+
if len(entity_id_list) > 1:
|
| 1076 |
+
logger.info(
|
| 1077 |
+
f"Entities {entity_id_list} correspond to the same sequence."
|
| 1078 |
+
)
|
| 1079 |
+
msa_info = bioassembly[int(entity_id_list[0]) - 1][PROT_TYPE_NAME]["msa"]
|
| 1080 |
+
msa_dir = msa_dirs[idx]
|
| 1081 |
+
|
| 1082 |
+
description = f"entity_{'_'.join(map(str, entity_id_list))}"
|
| 1083 |
+
sequence_feat = InferenceMSAFeaturizer.process_prot_single_sequence(
|
| 1084 |
+
sequence=sequence,
|
| 1085 |
+
description=description,
|
| 1086 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 1087 |
+
msa_dir=msa_dir,
|
| 1088 |
+
pairing_db=msa_info["pairing_db"],
|
| 1089 |
+
)
|
| 1090 |
+
sequence_feat = convert_monomer_features(sequence_feat)
|
| 1091 |
+
sequence_to_features[sequence] = sequence_feat
|
| 1092 |
+
if msa_dir and opexists(msa_dir) and idx in msa_sequences.keys():
|
| 1093 |
+
if (msa_save_dir := msa_info.get("msa_save_dir", None)) is not None:
|
| 1094 |
+
if opexists(dst_dir := opjoin(msa_save_dir, str(idx + 1))):
|
| 1095 |
+
shutil.rmtree(dst_dir)
|
| 1096 |
+
shutil.copytree(msa_dir, dst_dir)
|
| 1097 |
+
for fname in os.listdir(dst_dir):
|
| 1098 |
+
if not fname.endswith(".a3m"):
|
| 1099 |
+
os.remove(opjoin(dst_dir, fname))
|
| 1100 |
+
else:
|
| 1101 |
+
shutil.rmtree(msa_dir)
|
| 1102 |
+
|
| 1103 |
+
all_chain_features = {
|
| 1104 |
+
asym_id_int: deepcopy(
|
| 1105 |
+
sequence_to_features[entity_id_to_sequence[entity_id]]
|
| 1106 |
+
)
|
| 1107 |
+
for asym_id_int, entity_id in asym_to_entity_id.items()
|
| 1108 |
+
if seq in sequence_to_features
|
| 1109 |
+
}
|
| 1110 |
+
if len(all_chain_features) == 0:
|
| 1111 |
+
return None
|
| 1112 |
+
|
| 1113 |
+
np_example = merge_all_chain_features(
|
| 1114 |
+
pdb_id="test_assembly",
|
| 1115 |
+
all_chain_features=all_chain_features,
|
| 1116 |
+
asym_to_entity_id=asym_to_entity_id,
|
| 1117 |
+
is_homomer_or_monomer=is_homomer_or_monomer,
|
| 1118 |
+
merge_method="dense_max",
|
| 1119 |
+
max_size=MSA_MAX_SIZE,
|
| 1120 |
+
msa_entity_type="prot",
|
| 1121 |
+
)
|
| 1122 |
+
|
| 1123 |
+
return np_example
|
| 1124 |
+
|
| 1125 |
+
def make_msa_feature(
|
| 1126 |
+
bioassembly: Sequence[Mapping[str, Mapping[str, Any]]],
|
| 1127 |
+
entity_to_asym_id: Mapping[str, Sequence[str]],
|
| 1128 |
+
token_array: TokenArray,
|
| 1129 |
+
atom_array: AtomArray,
|
| 1130 |
+
) -> Optional[dict[str, np.ndarray]]:
|
| 1131 |
+
"""
|
| 1132 |
+
Processes the bioassembly to generate MSA features for protein entities in inference mode and tokenizes the features.
|
| 1133 |
+
|
| 1134 |
+
Args:
|
| 1135 |
+
bioassembly (Sequence[Mapping[str, Mapping[str, Any]]]): The bioassembly containing entity information.
|
| 1136 |
+
entity_to_asym_id (Mapping[str, Sequence[str]]): Mapping from entity ID to asym ID strings.
|
| 1137 |
+
token_array (TokenArray): Token array of the bioassembly.
|
| 1138 |
+
atom_array (AtomArray): Atom array of the bioassembly.
|
| 1139 |
+
|
| 1140 |
+
Returns:
|
| 1141 |
+
Optional[dict[str, np.ndarray]]: A dictionary containing the tokenized MSA features for the protein entities,
|
| 1142 |
+
or an empty dictionary if no features are generated.
|
| 1143 |
+
"""
|
| 1144 |
+
msa_feats = InferenceMSAFeaturizer.get_inference_prot_msa_features_for_assembly(
|
| 1145 |
+
bioassembly=bioassembly,
|
| 1146 |
+
entity_to_asym_id=entity_to_asym_id,
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
if msa_feats is None:
|
| 1150 |
+
return {}
|
| 1151 |
+
|
| 1152 |
+
msa_feats = tokenize_msa(
|
| 1153 |
+
msa_feats=msa_feats,
|
| 1154 |
+
token_array=token_array,
|
| 1155 |
+
atom_array=atom_array,
|
| 1156 |
+
)
|
| 1157 |
+
return {
|
| 1158 |
+
k: v
|
| 1159 |
+
for (k, v) in msa_feats.items()
|
| 1160 |
+
if k
|
| 1161 |
+
in ["msa", "has_deletion", "deletion_value", "deletion_mean", "profile"]
|
| 1162 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/msa_utils.py
ADDED
|
@@ -0,0 +1,1416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
import os
|
| 17 |
+
import shutil
|
| 18 |
+
import string
|
| 19 |
+
import subprocess
|
| 20 |
+
import time
|
| 21 |
+
import uuid
|
| 22 |
+
from collections import OrderedDict, defaultdict
|
| 23 |
+
from os.path import exists as opexists
|
| 24 |
+
from typing import (
|
| 25 |
+
Any,
|
| 26 |
+
Callable,
|
| 27 |
+
Iterable,
|
| 28 |
+
Mapping,
|
| 29 |
+
MutableMapping,
|
| 30 |
+
Optional,
|
| 31 |
+
Sequence,
|
| 32 |
+
Union,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
import numpy as np
|
| 36 |
+
|
| 37 |
+
from protenix.data.constants import (
|
| 38 |
+
PRO_STD_RESIDUES,
|
| 39 |
+
PROT_STD_RESIDUES_ONE_TO_THREE,
|
| 40 |
+
RNA_ID_TO_NT,
|
| 41 |
+
RNA_NT_TO_ID,
|
| 42 |
+
RNA_STD_RESIDUES,
|
| 43 |
+
)
|
| 44 |
+
from protenix.openfold_local.data import parsers
|
| 45 |
+
from protenix.openfold_local.data.msa_identifiers import (
|
| 46 |
+
Identifiers,
|
| 47 |
+
_extract_sequence_identifier,
|
| 48 |
+
_parse_sequence_identifier,
|
| 49 |
+
)
|
| 50 |
+
from protenix.openfold_local.data.msa_pairing import (
|
| 51 |
+
CHAIN_FEATURES,
|
| 52 |
+
MSA_FEATURES,
|
| 53 |
+
MSA_GAP_IDX,
|
| 54 |
+
MSA_PAD_VALUES,
|
| 55 |
+
SEQ_FEATURES,
|
| 56 |
+
block_diag,
|
| 57 |
+
create_paired_features,
|
| 58 |
+
deduplicate_unpaired_sequences,
|
| 59 |
+
)
|
| 60 |
+
from protenix.openfold_local.np import residue_constants
|
| 61 |
+
from protenix.utils.distributed import DIST_WRAPPER
|
| 62 |
+
|
| 63 |
+
# FeatureDict, make_dummy_msa_obj, convert_monomer_features
|
| 64 |
+
# These are modified from openfold: data/data_pipeline
|
| 65 |
+
try:
|
| 66 |
+
from protenix.openfold_local.data.tools import jackhmmer
|
| 67 |
+
except ImportError:
|
| 68 |
+
print(
|
| 69 |
+
"Failed to import packages for searching MSA; can only run with precomputed MSA"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
logger = logging.getLogger(__name__)
|
| 73 |
+
FeatureDict = MutableMapping[str, np.ndarray]
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
SEQ_FEATURES = list(SEQ_FEATURES) + ["profile"]
|
| 77 |
+
|
| 78 |
+
HHBLITS_INDEX_TO_OUR_INDEX = {
|
| 79 |
+
hhblits_index: (
|
| 80 |
+
PRO_STD_RESIDUES[PROT_STD_RESIDUES_ONE_TO_THREE[hhblits_letter]]
|
| 81 |
+
if hhblits_letter != "-"
|
| 82 |
+
else 31
|
| 83 |
+
)
|
| 84 |
+
for hhblits_index, hhblits_letter in residue_constants.ID_TO_HHBLITS_AA.items()
|
| 85 |
+
}
|
| 86 |
+
NEW_ORDER_LIST = tuple(
|
| 87 |
+
HHBLITS_INDEX_TO_OUR_INDEX[idx] for idx in range(len(HHBLITS_INDEX_TO_OUR_INDEX))
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
RNA_ID_TO_OUR_INDEX = {
|
| 91 |
+
key: RNA_STD_RESIDUES[value] if value != "-" else 31
|
| 92 |
+
for key, value in RNA_ID_TO_NT.items()
|
| 93 |
+
}
|
| 94 |
+
RNA_NEW_ORDER_LIST = tuple(
|
| 95 |
+
RNA_ID_TO_OUR_INDEX[idx] for idx in range(len(RNA_ID_TO_OUR_INDEX))
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
RNA_MSA_GAP_IDX = RNA_NT_TO_ID["-"]
|
| 99 |
+
|
| 100 |
+
RNA_MSA_PAD_VALUES = {
|
| 101 |
+
"msa_all_seq": RNA_MSA_GAP_IDX,
|
| 102 |
+
"msa_mask_all_seq": 1,
|
| 103 |
+
"deletion_matrix_all_seq": 0,
|
| 104 |
+
"deletion_matrix_int_all_seq": 0,
|
| 105 |
+
"msa": RNA_MSA_GAP_IDX,
|
| 106 |
+
"msa_mask": 1,
|
| 107 |
+
"deletion_matrix": 0,
|
| 108 |
+
"deletion_matrix_int": 0,
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
REQUIRED_FEATURES = frozenset(
|
| 112 |
+
{
|
| 113 |
+
"asym_id",
|
| 114 |
+
"entity_id",
|
| 115 |
+
"sym_id",
|
| 116 |
+
"has_deletion",
|
| 117 |
+
"deletion_mean",
|
| 118 |
+
"deletion_value",
|
| 119 |
+
"msa",
|
| 120 |
+
"profile",
|
| 121 |
+
"num_alignments",
|
| 122 |
+
"residue_index",
|
| 123 |
+
"prot_pair_num_alignments",
|
| 124 |
+
"prot_unpair_num_alignments",
|
| 125 |
+
"rna_pair_num_alignments",
|
| 126 |
+
"rna_unpair_num_alignments",
|
| 127 |
+
}
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
PROT_TYPE_NAME = "proteinChain" # inference protein name in json
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def make_dummy_msa_obj(input_sequence) -> parsers.Msa:
|
| 134 |
+
deletion_matrix = [[0 for _ in input_sequence]]
|
| 135 |
+
return parsers.Msa(
|
| 136 |
+
sequences=[input_sequence],
|
| 137 |
+
deletion_matrix=deletion_matrix,
|
| 138 |
+
descriptions=["dummy"],
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def convert_monomer_features(monomer_features: FeatureDict) -> FeatureDict:
|
| 143 |
+
"""Reshapes and modifies monomer features for multimer models."""
|
| 144 |
+
converted = {}
|
| 145 |
+
unnecessary_leading_dim_feats = {
|
| 146 |
+
"sequence",
|
| 147 |
+
"domain_name",
|
| 148 |
+
"num_alignments",
|
| 149 |
+
"seq_length",
|
| 150 |
+
}
|
| 151 |
+
for feature_name, feature in monomer_features.items():
|
| 152 |
+
if feature_name in unnecessary_leading_dim_feats:
|
| 153 |
+
# asarray ensures it's a np.ndarray.
|
| 154 |
+
feature = np.asarray(feature[0], dtype=feature.dtype)
|
| 155 |
+
elif feature_name == "aatype":
|
| 156 |
+
# The multimer model performs the one-hot operation itself.
|
| 157 |
+
feature = np.argmax(feature, axis=-1).astype(np.int32)
|
| 158 |
+
converted[feature_name] = feature
|
| 159 |
+
return converted
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def make_sequence_features(
|
| 163 |
+
sequence: str,
|
| 164 |
+
num_res: int,
|
| 165 |
+
mapping: dict = residue_constants.restype_order_with_x,
|
| 166 |
+
x_token: str = "X",
|
| 167 |
+
) -> FeatureDict:
|
| 168 |
+
"""
|
| 169 |
+
Construct a feature dict of sequence features
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
sequence (str): input sequence
|
| 173 |
+
num_res (int): number of residues in the input sequence
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
FeatureDict: basic features of the input sequence
|
| 177 |
+
"""
|
| 178 |
+
features = {}
|
| 179 |
+
features["aatype"] = residue_constants.sequence_to_onehot(
|
| 180 |
+
sequence=sequence,
|
| 181 |
+
mapping=mapping,
|
| 182 |
+
map_unknown_to_x=True,
|
| 183 |
+
x_token=x_token,
|
| 184 |
+
)
|
| 185 |
+
features["between_segment_residues"] = np.zeros((num_res,), dtype=np.int32)
|
| 186 |
+
features["residue_index"] = np.array(range(num_res), dtype=np.int32) + 1
|
| 187 |
+
features["seq_length"] = np.array([num_res] * num_res, dtype=np.int32)
|
| 188 |
+
features["sequence"] = np.array([sequence.encode("utf-8")], dtype=object)
|
| 189 |
+
return features
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def make_msa_features(
|
| 193 |
+
msas: Sequence[parsers.Msa],
|
| 194 |
+
identifier_func: Callable,
|
| 195 |
+
mapping: tuple[dict] = (
|
| 196 |
+
residue_constants.HHBLITS_AA_TO_ID,
|
| 197 |
+
residue_constants.ID_TO_HHBLITS_AA,
|
| 198 |
+
),
|
| 199 |
+
) -> FeatureDict:
|
| 200 |
+
"""
|
| 201 |
+
Constructs a feature dict of MSA features
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
msas (Sequence[parsers.Msa]): input MSA arrays
|
| 205 |
+
identifier_func (Callable): the function extracting species identifier from MSA
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
FeatureDict: raw MSA features
|
| 209 |
+
"""
|
| 210 |
+
if not msas:
|
| 211 |
+
raise ValueError("At least one MSA must be provided.")
|
| 212 |
+
|
| 213 |
+
int_msa = []
|
| 214 |
+
deletion_matrix = []
|
| 215 |
+
species_ids = []
|
| 216 |
+
seen_sequences = set()
|
| 217 |
+
for msa_index, msa in enumerate(msas):
|
| 218 |
+
if not msa:
|
| 219 |
+
raise ValueError(f"MSA {msa_index} must contain at least one sequence.")
|
| 220 |
+
for sequence_index, sequence in enumerate(msa.sequences):
|
| 221 |
+
if sequence in seen_sequences:
|
| 222 |
+
continue
|
| 223 |
+
seen_sequences.add(sequence)
|
| 224 |
+
int_msa.append([mapping[0][res] for res in sequence])
|
| 225 |
+
deletion_matrix.append(msa.deletion_matrix[sequence_index])
|
| 226 |
+
|
| 227 |
+
identifiers = identifier_func(
|
| 228 |
+
msa.descriptions[sequence_index],
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
species_ids.append(identifiers.species_id.encode("utf-8"))
|
| 232 |
+
|
| 233 |
+
# residue type from HHBLITS_AA_TO_ID
|
| 234 |
+
num_res = len(msas[0].sequences[0])
|
| 235 |
+
num_alignments = len(int_msa)
|
| 236 |
+
features = {}
|
| 237 |
+
features["deletion_matrix_int"] = np.array(deletion_matrix, dtype=np.int32)
|
| 238 |
+
features["msa"] = np.array(int_msa, dtype=np.int32)
|
| 239 |
+
features["num_alignments"] = np.array([num_alignments] * num_res, dtype=np.int32)
|
| 240 |
+
features["msa_species_identifiers"] = np.array(species_ids, dtype=np.object_)
|
| 241 |
+
features["profile"] = _make_msa_profile(
|
| 242 |
+
msa=features["msa"], dict_size=len(mapping[1])
|
| 243 |
+
) # [num_res, 27]
|
| 244 |
+
return features
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _make_msa_profile(msa: np.ndarray, dict_size: int) -> np.ndarray:
|
| 248 |
+
"""
|
| 249 |
+
Make MSA profile (distribution over residues)
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
msas (Sequence[parsers.Msa]): input MSA arrays
|
| 253 |
+
dict_size (int): number of residue types
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
np.array: MSA profile
|
| 257 |
+
"""
|
| 258 |
+
num_seqs = msa.shape[0]
|
| 259 |
+
all_res_types = np.arange(dict_size)
|
| 260 |
+
res_type_hits = msa[..., None] == all_res_types[None, ...]
|
| 261 |
+
res_type_counts = res_type_hits.sum(axis=0)
|
| 262 |
+
profile = res_type_counts / num_seqs
|
| 263 |
+
return profile
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def parse_a3m(path: str, seq_limit: int) -> tuple[list[str], list[str]]:
|
| 267 |
+
"""
|
| 268 |
+
Parse a .a3m file
|
| 269 |
+
|
| 270 |
+
Args:
|
| 271 |
+
path (str): file path
|
| 272 |
+
seq_limit (int): the max number of MSA sequences read from the file
|
| 273 |
+
seq_limit > 0: real limit
|
| 274 |
+
seq_limit = 0: return empty results
|
| 275 |
+
seq_limit < 0: no limit, return all results
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
tuple[list[str], list[str]]: parsed MSA sequences and their corresponding descriptions
|
| 279 |
+
"""
|
| 280 |
+
sequences, descriptions = [], []
|
| 281 |
+
if seq_limit == 0:
|
| 282 |
+
return sequences, descriptions
|
| 283 |
+
|
| 284 |
+
index = -1
|
| 285 |
+
with open(path, "r") as f:
|
| 286 |
+
for i, line in enumerate(f):
|
| 287 |
+
line = line.strip()
|
| 288 |
+
if line.startswith(">"):
|
| 289 |
+
if seq_limit > 0 and len(sequences) > seq_limit:
|
| 290 |
+
break
|
| 291 |
+
index += 1
|
| 292 |
+
descriptions.append(line[1:]) # Remove the '>' at the beginning.
|
| 293 |
+
sequences.append("")
|
| 294 |
+
continue
|
| 295 |
+
elif line.startswith("#"):
|
| 296 |
+
continue
|
| 297 |
+
elif not line:
|
| 298 |
+
continue # Skip blank lines.
|
| 299 |
+
sequences[index] += line
|
| 300 |
+
|
| 301 |
+
return sequences, descriptions
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def calc_stockholm_RNA_msa(
|
| 305 |
+
name_to_sequence: OrderedDict, query: Optional[str]
|
| 306 |
+
) -> dict[str, parsers.Msa]:
|
| 307 |
+
"""
|
| 308 |
+
Parses sequences and deletion matrix from stockholm format alignment.
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
stockholm_string: The string contents of a stockholm file. The first
|
| 312 |
+
sequence in the file should be the query sequence.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
A tuple of:
|
| 316 |
+
* A list of sequences that have been aligned to the query. These
|
| 317 |
+
might contain duplicates.
|
| 318 |
+
* The deletion matrix for the alignment as a list of lists. The element
|
| 319 |
+
at `deletion_matrix[i][j]` is the number of residues deleted from
|
| 320 |
+
the aligned sequence i at residue position j.
|
| 321 |
+
* The names of the targets matched, including the jackhmmer subsequence
|
| 322 |
+
suffix.
|
| 323 |
+
"""
|
| 324 |
+
msa = []
|
| 325 |
+
deletion_matrix = []
|
| 326 |
+
|
| 327 |
+
if query is not None:
|
| 328 |
+
# Add query string to the alignment
|
| 329 |
+
if len(name_to_sequence.keys()) == 0:
|
| 330 |
+
# name_to_sequence = OrderedDict({"query": query})
|
| 331 |
+
return OrderedDict()
|
| 332 |
+
else:
|
| 333 |
+
query = align_query_to_sto(query, list(name_to_sequence.values())[0])
|
| 334 |
+
new_name_to_sequence = OrderedDict({"query": query})
|
| 335 |
+
new_name_to_sequence.update(name_to_sequence)
|
| 336 |
+
name_to_sequence = new_name_to_sequence
|
| 337 |
+
|
| 338 |
+
keep_columns = []
|
| 339 |
+
for seq_index, sequence in enumerate(name_to_sequence.values()):
|
| 340 |
+
if seq_index == 0:
|
| 341 |
+
# Gather the columns with gaps from the query
|
| 342 |
+
query = sequence
|
| 343 |
+
keep_columns = [i for i, res in enumerate(query) if res != "-"]
|
| 344 |
+
|
| 345 |
+
if len(sequence) < len(query):
|
| 346 |
+
sequence = sequence + "-" * (len(query) - len(sequence))
|
| 347 |
+
|
| 348 |
+
# Remove the columns with gaps in the query from all sequences.
|
| 349 |
+
aligned_sequence = "".join([sequence[c] for c in keep_columns])
|
| 350 |
+
# Convert lower case letter to upper case
|
| 351 |
+
aligned_sequence = aligned_sequence.upper()
|
| 352 |
+
msa.append(aligned_sequence)
|
| 353 |
+
|
| 354 |
+
# Count the number of deletions w.r.t. query.
|
| 355 |
+
deletion_vec = []
|
| 356 |
+
deletion_count = 0
|
| 357 |
+
for seq_res, query_res in zip(sequence, query):
|
| 358 |
+
if seq_res not in ["-", "."] or query_res != "-":
|
| 359 |
+
if query_res == "-":
|
| 360 |
+
deletion_count += 1
|
| 361 |
+
else:
|
| 362 |
+
deletion_vec.append(deletion_count)
|
| 363 |
+
deletion_count = 0
|
| 364 |
+
deletion_matrix.append(deletion_vec)
|
| 365 |
+
|
| 366 |
+
return parsers.Msa(
|
| 367 |
+
sequences=msa,
|
| 368 |
+
deletion_matrix=deletion_matrix,
|
| 369 |
+
descriptions=list(name_to_sequence.keys()),
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def align_query_to_sto(query: str, sto_sequence: str):
|
| 374 |
+
"""
|
| 375 |
+
Aligns the query sequence to a Stockholm sequence by inserting gaps where necessary.
|
| 376 |
+
|
| 377 |
+
Args:
|
| 378 |
+
query (str): The query sequence to be aligned.
|
| 379 |
+
sto_sequence (str): The Stockholm sequence to which the query is aligned.
|
| 380 |
+
|
| 381 |
+
Returns:
|
| 382 |
+
str: The aligned query sequence.
|
| 383 |
+
"""
|
| 384 |
+
query = query.strip()
|
| 385 |
+
sto_sequence = sto_sequence.strip()
|
| 386 |
+
|
| 387 |
+
query_chars = []
|
| 388 |
+
j = 0
|
| 389 |
+
|
| 390 |
+
for i in range(len(sto_sequence)):
|
| 391 |
+
if sto_sequence[i].islower() or sto_sequence[i] == ".":
|
| 392 |
+
query_chars.append("-")
|
| 393 |
+
else:
|
| 394 |
+
query_chars.append(query[j])
|
| 395 |
+
j += 1
|
| 396 |
+
|
| 397 |
+
aligned_query = "".join(query_chars)
|
| 398 |
+
|
| 399 |
+
if j < len(query):
|
| 400 |
+
aligned_query += query[-(len(query) - j) :]
|
| 401 |
+
|
| 402 |
+
return aligned_query
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def parse_sto(path: str) -> OrderedDict:
|
| 406 |
+
"""
|
| 407 |
+
Parses a Stockholm file and returns an ordered dictionary mapping sequence names to their sequences.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
path (str): The path to the Stockholm file.
|
| 411 |
+
|
| 412 |
+
Returns:
|
| 413 |
+
OrderedDict: An ordered dictionary where keys are sequence names and values are the corresponding sequences.
|
| 414 |
+
"""
|
| 415 |
+
name_to_sequence = OrderedDict()
|
| 416 |
+
with open(path, "r") as f:
|
| 417 |
+
for i, line in enumerate(f):
|
| 418 |
+
line = line.strip()
|
| 419 |
+
if not line or line.startswith(("#", "//")):
|
| 420 |
+
continue
|
| 421 |
+
name, sequence = line.split()
|
| 422 |
+
if name not in name_to_sequence:
|
| 423 |
+
name_to_sequence[name] = ""
|
| 424 |
+
name_to_sequence[name] += sequence
|
| 425 |
+
return name_to_sequence
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def parse_msa_data(
|
| 429 |
+
raw_msa_paths: Sequence[str],
|
| 430 |
+
seq_limits: Sequence[int],
|
| 431 |
+
msa_entity_type: str,
|
| 432 |
+
query: Optional[str] = None,
|
| 433 |
+
) -> dict[str, parsers.Msa]:
|
| 434 |
+
"""
|
| 435 |
+
Parses MSA data based on the entity type (protein or RNA).
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
raw_msa_paths (Sequence[str]): Paths to the MSA files.
|
| 439 |
+
seq_limits (Sequence[int]): Limits on the number of sequences to read from each file.
|
| 440 |
+
msa_entity_type (str): Type of MSA entity, either "prot" for protein or "rna" for RNA.
|
| 441 |
+
query (Optional[str]): The query sequence for RNA MSA parsing. Defaults to None.
|
| 442 |
+
|
| 443 |
+
Returns:
|
| 444 |
+
dict[str, parsers.Msa]: A dictionary containing the parsed MSA data.
|
| 445 |
+
|
| 446 |
+
Raises:
|
| 447 |
+
ValueError: If `msa_entity_type` is not "prot" or "rna".
|
| 448 |
+
"""
|
| 449 |
+
if msa_entity_type == "prot":
|
| 450 |
+
return parse_prot_msa_data(raw_msa_paths, seq_limits)
|
| 451 |
+
|
| 452 |
+
if msa_entity_type == "rna":
|
| 453 |
+
return parse_rna_msa_data(raw_msa_paths, seq_limits, query=query)
|
| 454 |
+
return []
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def parse_rna_msa_data(
|
| 458 |
+
raw_msa_paths: Sequence[str],
|
| 459 |
+
seq_limits: Sequence[int],
|
| 460 |
+
query: Optional[str] = None,
|
| 461 |
+
) -> dict[str, parsers.Msa]:
|
| 462 |
+
"""
|
| 463 |
+
Parse MSAs for a sequence
|
| 464 |
+
|
| 465 |
+
Args:
|
| 466 |
+
raw_msa_paths (Sequence[str]): Paths of MSA files
|
| 467 |
+
seq_limits (Sequence[int]): The max number of MSA sequences read from each file
|
| 468 |
+
|
| 469 |
+
Returns:
|
| 470 |
+
Dict[str, parsers.Msa]: MSAs parsed from each file
|
| 471 |
+
"""
|
| 472 |
+
msa_data = {}
|
| 473 |
+
for path, seq_limit in zip(raw_msa_paths, seq_limits):
|
| 474 |
+
name_to_sequence = parse_sto(path)
|
| 475 |
+
# The sto file has been truncated to a maximum length of seq_limit
|
| 476 |
+
msa = calc_stockholm_RNA_msa(
|
| 477 |
+
name_to_sequence=name_to_sequence,
|
| 478 |
+
query=query,
|
| 479 |
+
)
|
| 480 |
+
if len(msa) > 0:
|
| 481 |
+
msa_data[path] = msa
|
| 482 |
+
return msa_data
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def parse_prot_msa_data(
|
| 486 |
+
raw_msa_paths: Sequence[str],
|
| 487 |
+
seq_limits: Sequence[int],
|
| 488 |
+
) -> dict[str, parsers.Msa]:
|
| 489 |
+
"""
|
| 490 |
+
Parse MSAs for a sequence
|
| 491 |
+
|
| 492 |
+
Args:
|
| 493 |
+
raw_msa_paths (Sequence[str]): Paths of MSA files
|
| 494 |
+
seq_limits (Sequence[int]): The max number of MSA sequences read from each file
|
| 495 |
+
|
| 496 |
+
Returns:
|
| 497 |
+
Dict[str, parsers.Msa]: MSAs parsed from each file
|
| 498 |
+
"""
|
| 499 |
+
msa_data = {}
|
| 500 |
+
for path, seq_limit in zip(raw_msa_paths, seq_limits):
|
| 501 |
+
sequences, descriptions = parse_a3m(path, seq_limit)
|
| 502 |
+
|
| 503 |
+
deletion_matrix = []
|
| 504 |
+
for msa_sequence in sequences:
|
| 505 |
+
deletion_vec = []
|
| 506 |
+
deletion_count = 0
|
| 507 |
+
for j in msa_sequence:
|
| 508 |
+
if j.islower():
|
| 509 |
+
deletion_count += 1
|
| 510 |
+
else:
|
| 511 |
+
deletion_vec.append(deletion_count)
|
| 512 |
+
deletion_count = 0
|
| 513 |
+
deletion_matrix.append(deletion_vec)
|
| 514 |
+
|
| 515 |
+
# Make the MSA matrix out of aligned (deletion-free) sequences.
|
| 516 |
+
deletion_table = str.maketrans("", "", string.ascii_lowercase)
|
| 517 |
+
aligned_sequences = [s.translate(deletion_table) for s in sequences]
|
| 518 |
+
assert all([len(seq) == len(aligned_sequences[0]) for seq in aligned_sequences])
|
| 519 |
+
assert all([len(vec) == len(deletion_matrix[0]) for vec in deletion_matrix])
|
| 520 |
+
|
| 521 |
+
if len(aligned_sequences) > 0:
|
| 522 |
+
# skip empty file
|
| 523 |
+
msa = parsers.Msa(
|
| 524 |
+
sequences=aligned_sequences,
|
| 525 |
+
deletion_matrix=deletion_matrix,
|
| 526 |
+
descriptions=descriptions,
|
| 527 |
+
)
|
| 528 |
+
msa_data[path] = msa
|
| 529 |
+
|
| 530 |
+
return msa_data
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def load_and_process_msa(
|
| 534 |
+
pdb_name: str,
|
| 535 |
+
msa_type: str,
|
| 536 |
+
raw_msa_paths: Sequence[str],
|
| 537 |
+
seq_limits: Sequence[int],
|
| 538 |
+
identifier_func: Optional[Callable] = lambda x: Identifiers(),
|
| 539 |
+
input_sequence: Optional[str] = None,
|
| 540 |
+
handle_empty: str = "return_self",
|
| 541 |
+
msa_entity_type: str = "prot",
|
| 542 |
+
) -> dict[str, Any]:
|
| 543 |
+
"""
|
| 544 |
+
Load and process MSA features of a single sequence
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
pdb_name (str): f"{pdb_id}_{entity_id}" of the input entity
|
| 548 |
+
msa_type (str): Type of MSA ("pairing" or "non_pairing")
|
| 549 |
+
raw_msa_paths (Sequence[str]): Paths of MSA files
|
| 550 |
+
identifier_func (Optional[Callable]): The function extracting species identifier from MSA
|
| 551 |
+
input_sequence (str): The input sequence
|
| 552 |
+
handle_empty (str): How to handle empty MSA ("return_self" or "raise_error")
|
| 553 |
+
entity_type (str): rna or prot
|
| 554 |
+
|
| 555 |
+
Returns:
|
| 556 |
+
Dict[str, Any]: processed MSA features
|
| 557 |
+
"""
|
| 558 |
+
msa_data = parse_msa_data(
|
| 559 |
+
raw_msa_paths, seq_limits, msa_entity_type=msa_entity_type, query=input_sequence
|
| 560 |
+
)
|
| 561 |
+
if len(msa_data) == 0:
|
| 562 |
+
if handle_empty == "return_self":
|
| 563 |
+
msa_data["dummy"] = make_dummy_msa_obj(input_sequence)
|
| 564 |
+
elif handle_empty == "raise_error":
|
| 565 |
+
ValueError(f"No valid {msa_type} MSA for {pdb_name}")
|
| 566 |
+
else:
|
| 567 |
+
raise NotImplementedError(
|
| 568 |
+
f"Unimplemented empty-handling method: {handle_empty}"
|
| 569 |
+
)
|
| 570 |
+
msas = list(msa_data.values())
|
| 571 |
+
|
| 572 |
+
if msa_type == "non_pairing":
|
| 573 |
+
return make_msa_features(
|
| 574 |
+
msas=msas,
|
| 575 |
+
identifier_func=identifier_func,
|
| 576 |
+
mapping=(
|
| 577 |
+
(residue_constants.HHBLITS_AA_TO_ID, residue_constants.ID_TO_HHBLITS_AA)
|
| 578 |
+
if msa_entity_type == "prot"
|
| 579 |
+
else (RNA_NT_TO_ID, RNA_ID_TO_NT)
|
| 580 |
+
),
|
| 581 |
+
)
|
| 582 |
+
elif msa_type == "pairing":
|
| 583 |
+
all_seq_features = make_msa_features(
|
| 584 |
+
msas=msas,
|
| 585 |
+
identifier_func=identifier_func,
|
| 586 |
+
mapping=(
|
| 587 |
+
(residue_constants.HHBLITS_AA_TO_ID, residue_constants.ID_TO_HHBLITS_AA)
|
| 588 |
+
if msa_entity_type == "prot"
|
| 589 |
+
else (RNA_NT_TO_ID, RNA_ID_TO_NT)
|
| 590 |
+
),
|
| 591 |
+
)
|
| 592 |
+
valid_feats = MSA_FEATURES + ("msa_species_identifiers",)
|
| 593 |
+
return {
|
| 594 |
+
f"{k}_all_seq": v for k, v in all_seq_features.items() if k in valid_feats
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def add_assembly_features(
|
| 599 |
+
pdb_id: str,
|
| 600 |
+
all_chain_features: MutableMapping[str, FeatureDict],
|
| 601 |
+
asym_to_entity_id: Mapping[int, str],
|
| 602 |
+
) -> dict[str, FeatureDict]:
|
| 603 |
+
"""
|
| 604 |
+
Add features to distinguish between chains.
|
| 605 |
+
|
| 606 |
+
Args:
|
| 607 |
+
all_chain_features (MutableMapping[str, FeatureDict]): A dictionary which maps chain_id to a dictionary of features for each chain.
|
| 608 |
+
asym_to_entity_id (Mapping[int, str]): A mapping from asym_id_int to entity_id
|
| 609 |
+
|
| 610 |
+
Returns:
|
| 611 |
+
all_chain_features (MutableMapping[str, FeatureDict]): all_chain_features with assembly features added
|
| 612 |
+
"""
|
| 613 |
+
# Group the chains by entity
|
| 614 |
+
grouped_chains = defaultdict(list)
|
| 615 |
+
for asym_id_int, chain_features in all_chain_features.items():
|
| 616 |
+
entity_id = asym_to_entity_id[asym_id_int]
|
| 617 |
+
chain_features["asym_id"] = asym_id_int
|
| 618 |
+
grouped_chains[entity_id].append(chain_features)
|
| 619 |
+
|
| 620 |
+
new_all_chain_features = {}
|
| 621 |
+
for entity_id, group_chain_features in grouped_chains.items():
|
| 622 |
+
assert int(entity_id) >= 0
|
| 623 |
+
for sym_id, chain_features in enumerate(group_chain_features, start=1):
|
| 624 |
+
new_all_chain_features[f"{entity_id}_{sym_id}"] = chain_features
|
| 625 |
+
seq_length = chain_features["seq_length"]
|
| 626 |
+
chain_features["asym_id"] = (
|
| 627 |
+
chain_features["asym_id"] * np.ones(seq_length)
|
| 628 |
+
).astype(np.int64)
|
| 629 |
+
chain_features["sym_id"] = (sym_id * np.ones(seq_length)).astype(np.int64)
|
| 630 |
+
chain_features["entity_id"] = (int(entity_id) * np.ones(seq_length)).astype(
|
| 631 |
+
np.int64
|
| 632 |
+
)
|
| 633 |
+
chain_features["pdb_id"] = pdb_id
|
| 634 |
+
return new_all_chain_features
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
def process_unmerged_features(
|
| 638 |
+
all_chain_features: MutableMapping[str, Mapping[str, np.ndarray]]
|
| 639 |
+
):
|
| 640 |
+
"""
|
| 641 |
+
Postprocessing stage for per-chain features before merging
|
| 642 |
+
|
| 643 |
+
Args:
|
| 644 |
+
all_chain_features (MutableMapping[str, Mapping[str, np.ndarray]]): MSA features of all chains
|
| 645 |
+
|
| 646 |
+
Returns:
|
| 647 |
+
post-processed per-chain features
|
| 648 |
+
"""
|
| 649 |
+
for chain_features in all_chain_features.values():
|
| 650 |
+
# Convert deletion matrices to float.
|
| 651 |
+
chain_features["deletion_matrix"] = np.asarray(
|
| 652 |
+
chain_features.pop("deletion_matrix_int"), dtype=np.float32
|
| 653 |
+
)
|
| 654 |
+
if "deletion_matrix_int_all_seq" in chain_features:
|
| 655 |
+
chain_features["deletion_matrix_all_seq"] = np.asarray(
|
| 656 |
+
chain_features.pop("deletion_matrix_int_all_seq"), dtype=np.float32
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
chain_features["deletion_mean"] = np.mean(
|
| 660 |
+
chain_features["deletion_matrix"], axis=0
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def pair_and_merge(
|
| 665 |
+
is_homomer_or_monomer: bool,
|
| 666 |
+
all_chain_features: MutableMapping[str, Mapping[str, np.ndarray]],
|
| 667 |
+
merge_method: str,
|
| 668 |
+
msa_crop_size: int,
|
| 669 |
+
) -> dict[str, np.ndarray]:
|
| 670 |
+
"""
|
| 671 |
+
Runs processing on features to augment, pair and merge
|
| 672 |
+
|
| 673 |
+
Args:
|
| 674 |
+
is_homomer_or_monomer (bool): True if the bioassembly is a homomer or a monomer
|
| 675 |
+
all_chain_features (MutableMapping[str, Mapping[str, np.ndarray]]):
|
| 676 |
+
A MutableMap of dictionaries of features for each chain.
|
| 677 |
+
merge_method (str): How to merge unpaired MSA features
|
| 678 |
+
|
| 679 |
+
Returns:
|
| 680 |
+
Dict[str, np.ndarray]: A dictionary of features
|
| 681 |
+
"""
|
| 682 |
+
|
| 683 |
+
process_unmerged_features(all_chain_features)
|
| 684 |
+
|
| 685 |
+
np_chains_list = list(all_chain_features.values())
|
| 686 |
+
|
| 687 |
+
pair_msa_sequences = not is_homomer_or_monomer
|
| 688 |
+
|
| 689 |
+
if pair_msa_sequences:
|
| 690 |
+
np_chains_list = create_paired_features(chains=np_chains_list)
|
| 691 |
+
np_chains_list = deduplicate_unpaired_sequences(np_chains_list)
|
| 692 |
+
|
| 693 |
+
np_chains_list = crop_chains(
|
| 694 |
+
np_chains_list,
|
| 695 |
+
msa_crop_size=msa_crop_size,
|
| 696 |
+
pair_msa_sequences=pair_msa_sequences,
|
| 697 |
+
)
|
| 698 |
+
|
| 699 |
+
np_example = merge_chain_features(
|
| 700 |
+
np_chains_list=np_chains_list,
|
| 701 |
+
pair_msa_sequences=pair_msa_sequences,
|
| 702 |
+
merge_method=merge_method,
|
| 703 |
+
msa_entity_type="prot",
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
np_example = process_prot_final(np_example)
|
| 707 |
+
|
| 708 |
+
return np_example
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
def rna_merge(
|
| 712 |
+
all_chain_features: MutableMapping[str, Mapping[str, np.ndarray]],
|
| 713 |
+
merge_method: str,
|
| 714 |
+
msa_crop_size: int,
|
| 715 |
+
) -> dict[str, np.ndarray]:
|
| 716 |
+
"""
|
| 717 |
+
Runs processing on features to augment and merge
|
| 718 |
+
|
| 719 |
+
Args:
|
| 720 |
+
all_chain_features (MutableMapping[str, Mapping[str, np.ndarray]]):
|
| 721 |
+
A MutableMap of dictionaries of features for each chain.
|
| 722 |
+
merge_method (str): how to merge unpaired MSA features
|
| 723 |
+
|
| 724 |
+
Returns:
|
| 725 |
+
Dict[str, np.ndarray]: A dictionary of features
|
| 726 |
+
"""
|
| 727 |
+
process_unmerged_features(all_chain_features)
|
| 728 |
+
np_chains_list = list(all_chain_features.values())
|
| 729 |
+
np_chains_list = crop_chains(
|
| 730 |
+
np_chains_list,
|
| 731 |
+
msa_crop_size=msa_crop_size,
|
| 732 |
+
pair_msa_sequences=False,
|
| 733 |
+
)
|
| 734 |
+
np_example = merge_chain_features(
|
| 735 |
+
np_chains_list=np_chains_list,
|
| 736 |
+
pair_msa_sequences=False,
|
| 737 |
+
merge_method=merge_method,
|
| 738 |
+
msa_entity_type="rna",
|
| 739 |
+
)
|
| 740 |
+
np_example = process_rna_final(np_example)
|
| 741 |
+
|
| 742 |
+
return np_example
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def merge_chain_features(
|
| 746 |
+
np_chains_list: list[Mapping[str, np.ndarray]],
|
| 747 |
+
pair_msa_sequences: bool,
|
| 748 |
+
merge_method: str,
|
| 749 |
+
msa_entity_type: str = "prot",
|
| 750 |
+
) -> Mapping[str, np.ndarray]:
|
| 751 |
+
"""
|
| 752 |
+
Merges features for multiple chains to single FeatureDict
|
| 753 |
+
|
| 754 |
+
Args:
|
| 755 |
+
np_chains_list (List[Mapping[str, np.ndarray]]): List of FeatureDicts for each chain
|
| 756 |
+
pair_msa_sequences (bool): Whether to merge paired MSAs
|
| 757 |
+
merge_method (str): how to merge unpaired MSA features
|
| 758 |
+
msa_entity_type (str): protein or rna
|
| 759 |
+
|
| 760 |
+
Returns:
|
| 761 |
+
Single FeatureDict for entire bioassembly
|
| 762 |
+
"""
|
| 763 |
+
np_chains_list = _merge_homomers_dense_features(
|
| 764 |
+
np_chains_list, merge_method, msa_entity_type=msa_entity_type
|
| 765 |
+
)
|
| 766 |
+
|
| 767 |
+
# Unpaired MSA features will be always block-diagonalised; paired MSA
|
| 768 |
+
# features will be concatenated.
|
| 769 |
+
np_example = _merge_features_from_multiple_chains(
|
| 770 |
+
np_chains_list,
|
| 771 |
+
pair_msa_sequences=False,
|
| 772 |
+
merge_method=merge_method,
|
| 773 |
+
msa_entity_type=msa_entity_type,
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
if pair_msa_sequences:
|
| 777 |
+
np_example = _concatenate_paired_and_unpaired_features(np_example)
|
| 778 |
+
|
| 779 |
+
np_example = _correct_post_merged_feats(
|
| 780 |
+
np_example=np_example,
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
return np_example
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
def _merge_homomers_dense_features(
|
| 787 |
+
chains: Iterable[Mapping[str, np.ndarray]],
|
| 788 |
+
merge_method: str,
|
| 789 |
+
msa_entity_type: str = "prot",
|
| 790 |
+
) -> list[dict[str, np.ndarray]]:
|
| 791 |
+
"""
|
| 792 |
+
Merge all identical chains, making the resulting MSA dense
|
| 793 |
+
|
| 794 |
+
Args:
|
| 795 |
+
chains (Iterable[Mapping[str, np.ndarray]]): An iterable of features for each chain
|
| 796 |
+
merge_method (str): how to merge unpaired MSA features
|
| 797 |
+
msa_entity_type (str): protein or rna
|
| 798 |
+
Returns:
|
| 799 |
+
List[Dict[str, np.ndarray]]: A list of feature dictionaries. All features with the same entity_id
|
| 800 |
+
will be merged - MSA features will be concatenated along the num_res dimension - making them dense.
|
| 801 |
+
"""
|
| 802 |
+
entity_chains = defaultdict(list)
|
| 803 |
+
for chain in chains:
|
| 804 |
+
entity_id = chain["entity_id"][0]
|
| 805 |
+
entity_chains[entity_id].append(chain)
|
| 806 |
+
|
| 807 |
+
grouped_chains = []
|
| 808 |
+
for entity_id in sorted(entity_chains):
|
| 809 |
+
chains = entity_chains[entity_id]
|
| 810 |
+
grouped_chains.append(chains)
|
| 811 |
+
chains = [
|
| 812 |
+
_merge_features_from_multiple_chains(
|
| 813 |
+
chains,
|
| 814 |
+
pair_msa_sequences=True,
|
| 815 |
+
merge_method=merge_method,
|
| 816 |
+
msa_entity_type=msa_entity_type,
|
| 817 |
+
)
|
| 818 |
+
for chains in grouped_chains
|
| 819 |
+
]
|
| 820 |
+
return chains
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
def _merge_msa_features(
|
| 824 |
+
*feats: np.ndarray,
|
| 825 |
+
feature_name: str,
|
| 826 |
+
merge_method: str,
|
| 827 |
+
msa_entity_type: str = "prot",
|
| 828 |
+
) -> np.ndarray:
|
| 829 |
+
"""
|
| 830 |
+
Merge unpaired MSA features
|
| 831 |
+
|
| 832 |
+
Args:
|
| 833 |
+
feats (np.ndarray): input features
|
| 834 |
+
feature_name (str): feature name
|
| 835 |
+
merge_method (str): how to merge unpaired MSA features
|
| 836 |
+
msa_entity_type (str): protein or rna
|
| 837 |
+
Returns:
|
| 838 |
+
np.ndarray: merged feature
|
| 839 |
+
"""
|
| 840 |
+
assert msa_entity_type in ["prot", "rna"]
|
| 841 |
+
if msa_entity_type == "prot":
|
| 842 |
+
mapping = MSA_PAD_VALUES
|
| 843 |
+
elif msa_entity_type == "rna":
|
| 844 |
+
mapping = RNA_MSA_PAD_VALUES
|
| 845 |
+
if merge_method == "sparse":
|
| 846 |
+
merged_feature = block_diag(*feats, pad_value=mapping[feature_name])
|
| 847 |
+
elif merge_method in ["dense_min"]:
|
| 848 |
+
merged_feature = truncate_at_min(*feats)
|
| 849 |
+
elif merge_method in ["dense_max"]:
|
| 850 |
+
merged_feature = pad_to_max(*feats, pad_value=mapping[feature_name])
|
| 851 |
+
else:
|
| 852 |
+
raise NotImplementedError(
|
| 853 |
+
f"Unknown merge method {merge_method}! Allowed merged methods are: "
|
| 854 |
+
)
|
| 855 |
+
return merged_feature
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def _merge_features_from_multiple_chains(
|
| 859 |
+
chains: Sequence[Mapping[str, np.ndarray]],
|
| 860 |
+
pair_msa_sequences: bool,
|
| 861 |
+
merge_method: str,
|
| 862 |
+
msa_entity_type: str = "prot",
|
| 863 |
+
) -> dict[str, np.ndarray]:
|
| 864 |
+
"""
|
| 865 |
+
Merge features from multiple chains.
|
| 866 |
+
|
| 867 |
+
Args:
|
| 868 |
+
chains (Sequence[Mapping[str, np.ndarray]]):
|
| 869 |
+
A list of feature dictionaries that we want to merge
|
| 870 |
+
pair_msa_sequences (bool): Whether to concatenate MSA features along the
|
| 871 |
+
num_res dimension (if True), or to block diagonalize them (if False)
|
| 872 |
+
merge_method (str): how to merge unpaired MSA features
|
| 873 |
+
msa_entity_type (str): protein or rna
|
| 874 |
+
Returns:
|
| 875 |
+
Dict[str, np.ndarray]: A feature dictionary for the merged example
|
| 876 |
+
"""
|
| 877 |
+
merged_example = {}
|
| 878 |
+
for feature_name in chains[0]:
|
| 879 |
+
feats = [x[feature_name] for x in chains]
|
| 880 |
+
feature_name_split = feature_name.split("_all_seq")[0]
|
| 881 |
+
if feature_name_split in MSA_FEATURES:
|
| 882 |
+
if pair_msa_sequences or "_all_seq" in feature_name:
|
| 883 |
+
merged_example[feature_name] = np.concatenate(feats, axis=1)
|
| 884 |
+
else:
|
| 885 |
+
merged_example[feature_name] = _merge_msa_features(
|
| 886 |
+
*feats,
|
| 887 |
+
merge_method=merge_method,
|
| 888 |
+
feature_name=feature_name,
|
| 889 |
+
msa_entity_type=msa_entity_type,
|
| 890 |
+
)
|
| 891 |
+
elif feature_name_split in SEQ_FEATURES:
|
| 892 |
+
merged_example[feature_name] = np.concatenate(feats, axis=0)
|
| 893 |
+
elif feature_name_split in CHAIN_FEATURES:
|
| 894 |
+
merged_example[feature_name] = np.sum([x for x in feats]).astype(np.int32)
|
| 895 |
+
else:
|
| 896 |
+
merged_example[feature_name] = feats[0]
|
| 897 |
+
return merged_example
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
def merge_features_from_prot_rna(
|
| 901 |
+
chains: Sequence[Mapping[str, np.ndarray]],
|
| 902 |
+
) -> dict[str, np.ndarray]:
|
| 903 |
+
"""
|
| 904 |
+
Merge features from prot and rna chains.
|
| 905 |
+
|
| 906 |
+
Args:
|
| 907 |
+
chains (Sequence[Mapping[str, np.ndarray]]):
|
| 908 |
+
A list of feature dictionaries that we want to merge
|
| 909 |
+
|
| 910 |
+
Returns:
|
| 911 |
+
Dict[str, np.ndarray]: A feature dictionary for the merged example
|
| 912 |
+
"""
|
| 913 |
+
merged_example = {}
|
| 914 |
+
if len(chains) == 1: # only prot or rna msa exists
|
| 915 |
+
return chains[0]
|
| 916 |
+
final_msa_pad_values = {
|
| 917 |
+
"msa": 31,
|
| 918 |
+
"has_deletion": False,
|
| 919 |
+
"deletion_value": 0,
|
| 920 |
+
}
|
| 921 |
+
for feature_name in set(chains[0].keys()).union(chains[1].keys()):
|
| 922 |
+
feats = [x[feature_name] for x in chains if feature_name in x]
|
| 923 |
+
if (
|
| 924 |
+
feature_name in SEQ_FEATURES
|
| 925 |
+
): # ["residue_index", "profile", "asym_id", "sym_id", "entity_id", "deletion_mean"]
|
| 926 |
+
merged_example[feature_name] = np.concatenate(feats, axis=0)
|
| 927 |
+
elif feature_name in ["msa", "has_deletion", "deletion_value"]:
|
| 928 |
+
merged_example[feature_name] = pad_to_max(
|
| 929 |
+
*feats, pad_value=final_msa_pad_values[feature_name]
|
| 930 |
+
)
|
| 931 |
+
elif feature_name in [
|
| 932 |
+
"prot_pair_num_alignments",
|
| 933 |
+
"prot_unpair_num_alignments",
|
| 934 |
+
"rna_pair_num_alignments",
|
| 935 |
+
"rna_unpair_num_alignments",
|
| 936 |
+
]: # unmerged keys keep for tracking
|
| 937 |
+
merged_example[feature_name] = feats[0]
|
| 938 |
+
else:
|
| 939 |
+
continue
|
| 940 |
+
merged_example["num_alignments"] = np.asarray(
|
| 941 |
+
merged_example["msa"].shape[0], dtype=np.int32
|
| 942 |
+
)
|
| 943 |
+
return merged_example
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
def _concatenate_paired_and_unpaired_features(
|
| 947 |
+
np_example: Mapping[str, np.ndarray]
|
| 948 |
+
) -> dict[str, np.ndarray]:
|
| 949 |
+
"""
|
| 950 |
+
Concatenate paired and unpaired features
|
| 951 |
+
|
| 952 |
+
Args:
|
| 953 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 954 |
+
|
| 955 |
+
Returns:
|
| 956 |
+
Dict[str, np.ndarray]: features with paired and unpaired features concatenated
|
| 957 |
+
"""
|
| 958 |
+
features = MSA_FEATURES
|
| 959 |
+
for feature_name in features:
|
| 960 |
+
if feature_name in np_example:
|
| 961 |
+
feat = np_example[feature_name]
|
| 962 |
+
feat_all_seq = np_example[feature_name + "_all_seq"]
|
| 963 |
+
merged_feat = np.concatenate([feat_all_seq, feat], axis=0)
|
| 964 |
+
np_example[feature_name] = merged_feat
|
| 965 |
+
np_example["num_alignments"] = np.array(np_example["msa"].shape[0], dtype=np.int32)
|
| 966 |
+
return np_example
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
def _correct_post_merged_feats(
|
| 970 |
+
np_example: Mapping[str, np.ndarray],
|
| 971 |
+
) -> dict[str, np.ndarray]:
|
| 972 |
+
"""
|
| 973 |
+
Adds features that need to be computed/recomputed post merging
|
| 974 |
+
|
| 975 |
+
Args:
|
| 976 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 977 |
+
|
| 978 |
+
Returns:
|
| 979 |
+
Dict[str, np.ndarray]: processed features
|
| 980 |
+
"""
|
| 981 |
+
|
| 982 |
+
np_example["seq_length"] = np.asarray(np_example["aatype"].shape[0], dtype=np.int32)
|
| 983 |
+
np_example["num_alignments"] = np.asarray(
|
| 984 |
+
np_example["msa"].shape[0], dtype=np.int32
|
| 985 |
+
)
|
| 986 |
+
return np_example
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
def _add_msa_num_alignment(
|
| 990 |
+
np_example: Mapping[str, np.ndarray], msa_entity_type: str = "prot"
|
| 991 |
+
) -> dict[str, np.ndarray]:
|
| 992 |
+
"""
|
| 993 |
+
Adds pair and unpair msa alignments num
|
| 994 |
+
|
| 995 |
+
Args:
|
| 996 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 997 |
+
|
| 998 |
+
Returns:
|
| 999 |
+
Dict[str, np.ndarray]: processed features
|
| 1000 |
+
"""
|
| 1001 |
+
assert msa_entity_type in ["prot", "rna"]
|
| 1002 |
+
if "msa_all_seq" in np_example:
|
| 1003 |
+
pair_num_alignments = np.asarray(
|
| 1004 |
+
np_example["msa_all_seq"].shape[0], dtype=np.int32
|
| 1005 |
+
)
|
| 1006 |
+
else:
|
| 1007 |
+
pair_num_alignments = np.asarray(0, dtype=np.int32)
|
| 1008 |
+
np_example[f"{msa_entity_type}_pair_num_alignments"] = pair_num_alignments
|
| 1009 |
+
np_example[f"{msa_entity_type}_unpair_num_alignments"] = (
|
| 1010 |
+
np_example["num_alignments"] - pair_num_alignments
|
| 1011 |
+
)
|
| 1012 |
+
return np_example
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def process_prot_final(np_example: Mapping[str, np.ndarray]) -> dict[str, np.ndarray]:
|
| 1016 |
+
"""
|
| 1017 |
+
Final processing steps in data pipeline, after merging and pairing
|
| 1018 |
+
|
| 1019 |
+
Args:
|
| 1020 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1021 |
+
|
| 1022 |
+
Returns:
|
| 1023 |
+
Dict[str, np.ndarray]: processed features
|
| 1024 |
+
"""
|
| 1025 |
+
np_example = correct_msa_restypes(np_example)
|
| 1026 |
+
np_example = final_transform(np_example)
|
| 1027 |
+
np_example = _add_msa_num_alignment(np_example, msa_entity_type="prot")
|
| 1028 |
+
np_example = filter_features(np_example)
|
| 1029 |
+
|
| 1030 |
+
return np_example
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
def correct_msa_restypes(np_example: Mapping[str, np.ndarray]) -> dict[str, np.ndarray]:
|
| 1034 |
+
"""
|
| 1035 |
+
Correct MSA restype to have the same order as residue_constants
|
| 1036 |
+
|
| 1037 |
+
Args:
|
| 1038 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1039 |
+
|
| 1040 |
+
Returns:
|
| 1041 |
+
Dict[str, np.ndarray]: processed features
|
| 1042 |
+
"""
|
| 1043 |
+
# remap msa
|
| 1044 |
+
np_example["msa"] = np.take(NEW_ORDER_LIST, np_example["msa"], axis=0)
|
| 1045 |
+
np_example["msa"] = np_example["msa"].astype(np.int32)
|
| 1046 |
+
|
| 1047 |
+
seq_len, profile_dim = np_example["profile"].shape
|
| 1048 |
+
assert profile_dim == len(NEW_ORDER_LIST)
|
| 1049 |
+
profile = np.zeros((seq_len, 32))
|
| 1050 |
+
profile[:, np.array(NEW_ORDER_LIST)] = np_example["profile"]
|
| 1051 |
+
|
| 1052 |
+
np_example["profile"] = profile
|
| 1053 |
+
return np_example
|
| 1054 |
+
|
| 1055 |
+
|
| 1056 |
+
def process_rna_final(np_example: Mapping[str, np.ndarray]) -> dict[str, np.ndarray]:
|
| 1057 |
+
"""
|
| 1058 |
+
Final processing steps in data pipeline, after merging and pairing
|
| 1059 |
+
|
| 1060 |
+
Args:
|
| 1061 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1062 |
+
|
| 1063 |
+
Returns:
|
| 1064 |
+
Dict[str, np.ndarray]: processed features
|
| 1065 |
+
"""
|
| 1066 |
+
np_example = correct_rna_msa_restypes(np_example)
|
| 1067 |
+
np_example = final_transform(np_example)
|
| 1068 |
+
np_example = _add_msa_num_alignment(np_example, msa_entity_type="rna")
|
| 1069 |
+
np_example = filter_features(np_example)
|
| 1070 |
+
|
| 1071 |
+
return np_example
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
def correct_rna_msa_restypes(
|
| 1075 |
+
np_example: Mapping[str, np.ndarray]
|
| 1076 |
+
) -> dict[str, np.ndarray]:
|
| 1077 |
+
"""
|
| 1078 |
+
Correct MSA restype to have the same order as residue_constants
|
| 1079 |
+
|
| 1080 |
+
Args:
|
| 1081 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1082 |
+
|
| 1083 |
+
Returns:
|
| 1084 |
+
Dict[str, np.ndarray]: processed features
|
| 1085 |
+
"""
|
| 1086 |
+
# remap msa
|
| 1087 |
+
np_example["msa"] = np.take(RNA_NEW_ORDER_LIST, np_example["msa"], axis=0)
|
| 1088 |
+
np_example["msa"] = np_example["msa"].astype(np.int32)
|
| 1089 |
+
|
| 1090 |
+
seq_len, profile_dim = np_example["profile"].shape
|
| 1091 |
+
assert profile_dim == len(RNA_NEW_ORDER_LIST)
|
| 1092 |
+
profile = np.zeros((seq_len, 32))
|
| 1093 |
+
profile[:, np.array(RNA_NEW_ORDER_LIST)] = np_example["profile"]
|
| 1094 |
+
|
| 1095 |
+
np_example["profile"] = profile
|
| 1096 |
+
return np_example
|
| 1097 |
+
|
| 1098 |
+
|
| 1099 |
+
def final_transform(np_example: Mapping[str, np.ndarray]) -> dict[str, np.ndarray]:
|
| 1100 |
+
"""
|
| 1101 |
+
Performing some transformations related to deletion_matrix
|
| 1102 |
+
|
| 1103 |
+
Args:
|
| 1104 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1105 |
+
|
| 1106 |
+
Returns:
|
| 1107 |
+
Dict[str, np.ndarray]: processed features
|
| 1108 |
+
"""
|
| 1109 |
+
deletion_mat = np_example.pop("deletion_matrix")
|
| 1110 |
+
np_example["has_deletion"] = np.clip(deletion_mat, a_min=0, a_max=1).astype(
|
| 1111 |
+
np.bool_
|
| 1112 |
+
)
|
| 1113 |
+
|
| 1114 |
+
np_example["deletion_value"] = (2 / np.pi) * np.arctan(deletion_mat / 3)
|
| 1115 |
+
assert np.all(-1e-5 < np_example["deletion_value"]) and np.all(
|
| 1116 |
+
np_example["deletion_value"] < (1 + 1e-5)
|
| 1117 |
+
)
|
| 1118 |
+
return np_example
|
| 1119 |
+
|
| 1120 |
+
|
| 1121 |
+
def filter_features(np_example: Mapping[str, np.ndarray]) -> dict[str, np.ndarray]:
|
| 1122 |
+
"""
|
| 1123 |
+
Filters features of example to only those requested
|
| 1124 |
+
|
| 1125 |
+
Args:
|
| 1126 |
+
np_example (Mapping[str, np.ndarray]): input features
|
| 1127 |
+
|
| 1128 |
+
Returns:
|
| 1129 |
+
Dict[str, np.ndarray]: processed features
|
| 1130 |
+
"""
|
| 1131 |
+
return {k: v for (k, v) in np_example.items() if k in REQUIRED_FEATURES}
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def crop_chains(
|
| 1135 |
+
chains_list: Sequence[Mapping[str, np.ndarray]],
|
| 1136 |
+
msa_crop_size: int,
|
| 1137 |
+
pair_msa_sequences: bool,
|
| 1138 |
+
) -> list[Mapping[str, np.ndarray]]:
|
| 1139 |
+
"""
|
| 1140 |
+
Crops the MSAs for a set of chains.
|
| 1141 |
+
|
| 1142 |
+
Args:
|
| 1143 |
+
chains_list (Sequence[Mapping[str, np.ndarray]]): A list of chains to be cropped.
|
| 1144 |
+
msa_crop_size (int): The total number of sequences to crop from the MSA.
|
| 1145 |
+
pair_msa_sequences (bool): Whether we are operating in sequence-pairing mode.
|
| 1146 |
+
|
| 1147 |
+
Returns:
|
| 1148 |
+
List[Mapping[str, np.ndarray]]: The chains cropped
|
| 1149 |
+
"""
|
| 1150 |
+
|
| 1151 |
+
# Apply the cropping.
|
| 1152 |
+
cropped_chains = []
|
| 1153 |
+
for chain in chains_list:
|
| 1154 |
+
cropped_chain = _crop_single_chain(
|
| 1155 |
+
chain,
|
| 1156 |
+
msa_crop_size=msa_crop_size,
|
| 1157 |
+
pair_msa_sequences=pair_msa_sequences,
|
| 1158 |
+
)
|
| 1159 |
+
cropped_chains.append(cropped_chain)
|
| 1160 |
+
|
| 1161 |
+
return cropped_chains
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
def _crop_single_chain(
|
| 1165 |
+
chain: Mapping[str, np.ndarray],
|
| 1166 |
+
msa_crop_size: int, # 2048
|
| 1167 |
+
pair_msa_sequences: bool,
|
| 1168 |
+
) -> dict[str, np.ndarray]:
|
| 1169 |
+
"""
|
| 1170 |
+
Crops msa sequences to msa_crop_size
|
| 1171 |
+
|
| 1172 |
+
Args:
|
| 1173 |
+
chain (Mapping[str, np.ndarray]): The chain to be cropped
|
| 1174 |
+
msa_crop_size (int): The total number of sequences to crop from the MSA
|
| 1175 |
+
pair_msa_sequences (bool): Whether we are operating in sequence-pairing mode
|
| 1176 |
+
|
| 1177 |
+
Returns:
|
| 1178 |
+
Dict[str, np.ndarray]: The chains cropped
|
| 1179 |
+
"""
|
| 1180 |
+
msa_size = chain["num_alignments"]
|
| 1181 |
+
|
| 1182 |
+
if pair_msa_sequences:
|
| 1183 |
+
msa_size_all_seq = chain["num_alignments_all_seq"]
|
| 1184 |
+
msa_crop_size_all_seq = np.minimum(msa_size_all_seq, msa_crop_size // 2)
|
| 1185 |
+
|
| 1186 |
+
# We reduce the number of un-paired sequences, by the number of times a
|
| 1187 |
+
# sequence from this chain's MSA is included in the paired MSA. This keeps
|
| 1188 |
+
# the MSA size for each chain roughly constant.
|
| 1189 |
+
msa_all_seq = chain["msa_all_seq"][:msa_crop_size_all_seq, :]
|
| 1190 |
+
num_non_gapped_pairs = np.sum(np.any(msa_all_seq != MSA_GAP_IDX, axis=1))
|
| 1191 |
+
num_non_gapped_pairs = np.minimum(num_non_gapped_pairs, msa_crop_size_all_seq)
|
| 1192 |
+
|
| 1193 |
+
# Restrict the unpaired crop size so that paired+unpaired sequences do not
|
| 1194 |
+
# exceed msa_seqs_per_chain for each chain.
|
| 1195 |
+
max_msa_crop_size = np.maximum(msa_crop_size - num_non_gapped_pairs, 0)
|
| 1196 |
+
msa_crop_size = np.minimum(msa_size, max_msa_crop_size)
|
| 1197 |
+
else:
|
| 1198 |
+
msa_crop_size = np.minimum(msa_size, msa_crop_size)
|
| 1199 |
+
|
| 1200 |
+
for k in chain:
|
| 1201 |
+
k_split = k.split("_all_seq")[0]
|
| 1202 |
+
if k_split in MSA_FEATURES:
|
| 1203 |
+
if "_all_seq" in k and pair_msa_sequences:
|
| 1204 |
+
chain[k] = chain[k][:msa_crop_size_all_seq, :]
|
| 1205 |
+
else:
|
| 1206 |
+
chain[k] = chain[k][:msa_crop_size, :]
|
| 1207 |
+
|
| 1208 |
+
chain["num_alignments"] = np.asarray(msa_crop_size, dtype=np.int32)
|
| 1209 |
+
if pair_msa_sequences:
|
| 1210 |
+
chain["num_alignments_all_seq"] = np.asarray(
|
| 1211 |
+
msa_crop_size_all_seq, dtype=np.int32
|
| 1212 |
+
)
|
| 1213 |
+
return chain
|
| 1214 |
+
|
| 1215 |
+
|
| 1216 |
+
def truncate_at_min(*arrs: np.ndarray) -> np.ndarray:
|
| 1217 |
+
"""
|
| 1218 |
+
Processing unpaired features by truncating at the min length
|
| 1219 |
+
|
| 1220 |
+
Args:
|
| 1221 |
+
arrs (np.ndarray): input features
|
| 1222 |
+
|
| 1223 |
+
Returns:
|
| 1224 |
+
np.ndarray: truncated features
|
| 1225 |
+
"""
|
| 1226 |
+
min_num_msa = min([x.shape[0] for x in arrs])
|
| 1227 |
+
truncated_arrs = [x[:min_num_msa, :] for x in arrs]
|
| 1228 |
+
new_arrs = np.concatenate(truncated_arrs, axis=1)
|
| 1229 |
+
return new_arrs
|
| 1230 |
+
|
| 1231 |
+
|
| 1232 |
+
def pad_to_max(*arrs: np.ndarray, pad_value: float = 0.0) -> np.ndarray:
|
| 1233 |
+
"""
|
| 1234 |
+
Processing unpaired features by padding to the max length
|
| 1235 |
+
|
| 1236 |
+
Args:
|
| 1237 |
+
arrs (np.ndarray): input features
|
| 1238 |
+
|
| 1239 |
+
Returns:
|
| 1240 |
+
np.ndarray: padded features
|
| 1241 |
+
"""
|
| 1242 |
+
max_num_msa = max([x.shape[0] for x in arrs])
|
| 1243 |
+
padded_arrs = [
|
| 1244 |
+
np.pad(
|
| 1245 |
+
x,
|
| 1246 |
+
((0, (max_num_msa - x.shape[0])), (0, 0)),
|
| 1247 |
+
mode="constant",
|
| 1248 |
+
constant_values=pad_value,
|
| 1249 |
+
)
|
| 1250 |
+
for x in arrs
|
| 1251 |
+
]
|
| 1252 |
+
new_arrs = np.concatenate(padded_arrs, axis=1)
|
| 1253 |
+
return new_arrs
|
| 1254 |
+
|
| 1255 |
+
|
| 1256 |
+
def clip_msa(
|
| 1257 |
+
np_example: Mapping[str, np.ndarray], max_num_msa: int
|
| 1258 |
+
) -> dict[str, np.ndarray]:
|
| 1259 |
+
"""
|
| 1260 |
+
Clip MSA features to a maximum length
|
| 1261 |
+
|
| 1262 |
+
Args:
|
| 1263 |
+
np_example (Mapping[str, np.ndarray]): input MSA features
|
| 1264 |
+
pad_value (float): pad value
|
| 1265 |
+
|
| 1266 |
+
Returns:
|
| 1267 |
+
Dict[str, np.ndarray]: clipped MSA features
|
| 1268 |
+
"""
|
| 1269 |
+
if np_example["msa"].shape[0] > max_num_msa:
|
| 1270 |
+
for k in ["msa", "has_deletion", "deletion_value"]:
|
| 1271 |
+
np_example[k] = np_example[k][:max_num_msa, :]
|
| 1272 |
+
np_example["num_alignments"] = max_num_msa
|
| 1273 |
+
assert np_example["num_alignments"] == np_example["msa"].shape[0]
|
| 1274 |
+
return np_example
|
| 1275 |
+
|
| 1276 |
+
|
| 1277 |
+
def get_identifier_func(pairing_db: str) -> Callable:
|
| 1278 |
+
"""
|
| 1279 |
+
Get the function the extracts species identifier from sequence descriptions
|
| 1280 |
+
|
| 1281 |
+
Args:
|
| 1282 |
+
pairing_db (str): the database from which MSAs for pairing are searched
|
| 1283 |
+
|
| 1284 |
+
Returns:
|
| 1285 |
+
Callable: the function the extracts species identifier from sequence descriptions
|
| 1286 |
+
"""
|
| 1287 |
+
if pairing_db.startswith("uniprot"):
|
| 1288 |
+
|
| 1289 |
+
def func(description: str) -> Identifiers:
|
| 1290 |
+
sequence_identifier = _extract_sequence_identifier(description)
|
| 1291 |
+
if sequence_identifier is None:
|
| 1292 |
+
return Identifiers()
|
| 1293 |
+
else:
|
| 1294 |
+
return _parse_sequence_identifier(sequence_identifier)
|
| 1295 |
+
|
| 1296 |
+
return func
|
| 1297 |
+
|
| 1298 |
+
elif pairing_db.startswith("uniref100"):
|
| 1299 |
+
|
| 1300 |
+
def func(description: str) -> Identifiers:
|
| 1301 |
+
if (
|
| 1302 |
+
description.startswith("UniRef100")
|
| 1303 |
+
and "/" in description
|
| 1304 |
+
and (first_comp := description.split("/")[0]).count("_") == 2
|
| 1305 |
+
):
|
| 1306 |
+
identifier = Identifiers(species_id=first_comp.split("_")[-1])
|
| 1307 |
+
else:
|
| 1308 |
+
identifier = Identifiers()
|
| 1309 |
+
return identifier
|
| 1310 |
+
|
| 1311 |
+
return func
|
| 1312 |
+
else:
|
| 1313 |
+
raise NotImplementedError(
|
| 1314 |
+
f"Identifier func for {pairing_db} is not implemented"
|
| 1315 |
+
)
|
| 1316 |
+
|
| 1317 |
+
|
| 1318 |
+
def run_msa_tool(
|
| 1319 |
+
msa_runner,
|
| 1320 |
+
fasta_path: str,
|
| 1321 |
+
msa_out_path: str,
|
| 1322 |
+
msa_format: str,
|
| 1323 |
+
max_sto_sequences: Optional[int] = None,
|
| 1324 |
+
) -> Mapping[str, Any]:
|
| 1325 |
+
"""Runs an MSA tool, checking if output already exists first."""
|
| 1326 |
+
if msa_format == "sto" and max_sto_sequences is not None:
|
| 1327 |
+
result = msa_runner.query(fasta_path, max_sto_sequences)[0]
|
| 1328 |
+
else:
|
| 1329 |
+
result = msa_runner.query(fasta_path)[0]
|
| 1330 |
+
|
| 1331 |
+
assert msa_out_path.split(".")[-1] == msa_format
|
| 1332 |
+
with open(msa_out_path, "w") as f:
|
| 1333 |
+
f.write(result[msa_format])
|
| 1334 |
+
|
| 1335 |
+
return result
|
| 1336 |
+
|
| 1337 |
+
|
| 1338 |
+
def search_msa(sequence: str, db_fpath: str, res_fpath: str = ""):
|
| 1339 |
+
assert opexists(
|
| 1340 |
+
db_fpath
|
| 1341 |
+
), f"Database path for MSA searching does not exists:\n{db_fpath}"
|
| 1342 |
+
seq_name = uuid.uuid4().hex
|
| 1343 |
+
db_name = os.path.basename(db_fpath)
|
| 1344 |
+
jackhmmer_binary_path = shutil.which("jackhmmer")
|
| 1345 |
+
msa_runner = jackhmmer.Jackhmmer(
|
| 1346 |
+
binary_path=jackhmmer_binary_path,
|
| 1347 |
+
database_path=db_fpath,
|
| 1348 |
+
n_cpu=2,
|
| 1349 |
+
)
|
| 1350 |
+
if res_fpath == "":
|
| 1351 |
+
tmp_dir = f"/tmp/{uuid.uuid4().hex}"
|
| 1352 |
+
res_fpath = os.path.join(tmp_dir, f"{seq_name}.a3m")
|
| 1353 |
+
else:
|
| 1354 |
+
tmp_dir = os.path.dirname(res_fpath)
|
| 1355 |
+
os.makedirs(tmp_dir, exist_ok=True)
|
| 1356 |
+
output_sto_path = os.path.join(tmp_dir, f"{seq_name}.sto")
|
| 1357 |
+
with open((tmp_fasta_path := f"{tmp_dir}/{seq_name}_{db_name}.fasta"), "w") as f:
|
| 1358 |
+
f.write(f">query\n")
|
| 1359 |
+
f.write(sequence)
|
| 1360 |
+
|
| 1361 |
+
logger.info(f"Searching MSA for {seq_name}\n. Will be saved to {output_sto_path}")
|
| 1362 |
+
_ = run_msa_tool(msa_runner, tmp_fasta_path, output_sto_path, "sto")
|
| 1363 |
+
if not opexists(output_sto_path):
|
| 1364 |
+
logger.info(f"Failed to search MSA for {sequence} from the database {db_fpath}")
|
| 1365 |
+
return
|
| 1366 |
+
|
| 1367 |
+
logger.info(f"Reformatting the MSA file. Will be saved to {res_fpath}")
|
| 1368 |
+
|
| 1369 |
+
cmd = f"/opt/hhsuite/scripts/reformat.pl {output_sto_path} {res_fpath}"
|
| 1370 |
+
try:
|
| 1371 |
+
subprocess.check_call(cmd, shell=True, executable="/bin/bash")
|
| 1372 |
+
except Exception as e:
|
| 1373 |
+
logger.info(f"Reformatting failed:\n {e}\nRetry {cmd}...")
|
| 1374 |
+
time.sleep(1)
|
| 1375 |
+
subprocess.check_call(cmd, shell=True, executable="/bin/bash")
|
| 1376 |
+
if not os.path.exists(res_fpath):
|
| 1377 |
+
logger.info(
|
| 1378 |
+
f"Failed to reformat the MSA file. Please check the validity of the .sto file{output_sto_path}"
|
| 1379 |
+
)
|
| 1380 |
+
return
|
| 1381 |
+
|
| 1382 |
+
|
| 1383 |
+
def search_msa_paired(
|
| 1384 |
+
sequence: str, pairing_db_fpath: str, non_pairing_db_fpath: str, idx: int = -1
|
| 1385 |
+
) -> tuple[Union[str, None], int]:
|
| 1386 |
+
tmp_dir = f"/tmp/{uuid.uuid4().hex}_{str(time.time()).replace('.', '_')}_{DIST_WRAPPER.rank}_{idx}"
|
| 1387 |
+
os.makedirs(tmp_dir, exist_ok=True)
|
| 1388 |
+
pairing_file = os.path.join(tmp_dir, "pairing.a3m")
|
| 1389 |
+
search_msa(sequence, pairing_db_fpath, pairing_file)
|
| 1390 |
+
if not os.path.exists(pairing_file):
|
| 1391 |
+
return None, idx
|
| 1392 |
+
non_pairing_file = os.path.join(tmp_dir, "non_pairing.a3m")
|
| 1393 |
+
search_msa(sequence, non_pairing_db_fpath, non_pairing_file)
|
| 1394 |
+
if not os.path.exists(non_pairing_file):
|
| 1395 |
+
return None, idx
|
| 1396 |
+
else:
|
| 1397 |
+
return tmp_dir, idx
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
def msa_parallel(sequences: dict[int, tuple[str, str, str]]) -> dict[int, str]:
|
| 1401 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 1402 |
+
|
| 1403 |
+
num_threads = 4
|
| 1404 |
+
results = []
|
| 1405 |
+
with ThreadPoolExecutor(max_workers=num_threads) as executor:
|
| 1406 |
+
futures = [
|
| 1407 |
+
executor.submit(search_msa_paired, seq[0], seq[1], seq[2], idx)
|
| 1408 |
+
for idx, seq in sequences.items()
|
| 1409 |
+
]
|
| 1410 |
+
# Wait for all threads to complete
|
| 1411 |
+
for future in futures:
|
| 1412 |
+
results.append(future.result())
|
| 1413 |
+
msa_res = {}
|
| 1414 |
+
for x in results:
|
| 1415 |
+
msa_res[x[1]] = x[0]
|
| 1416 |
+
return msa_res
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/parser.py
ADDED
|
@@ -0,0 +1,1173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import copy
|
| 16 |
+
import functools
|
| 17 |
+
import gzip
|
| 18 |
+
import logging
|
| 19 |
+
from collections import Counter
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
from typing import Optional, Union
|
| 22 |
+
|
| 23 |
+
import biotite.structure as struc
|
| 24 |
+
import biotite.structure.io.pdbx as pdbx
|
| 25 |
+
import numpy as np
|
| 26 |
+
import pandas as pd
|
| 27 |
+
from biotite.structure import AtomArray, get_chain_starts, get_residue_starts
|
| 28 |
+
from biotite.structure.io.pdbx import convert as pdbx_convert
|
| 29 |
+
from biotite.structure.molecules import get_molecule_indices
|
| 30 |
+
|
| 31 |
+
from protenix.data import ccd
|
| 32 |
+
from protenix.data.ccd import get_ccd_ref_info
|
| 33 |
+
from protenix.data.constants import (
|
| 34 |
+
DNA_STD_RESIDUES,
|
| 35 |
+
PROT_STD_RESIDUES_ONE_TO_THREE,
|
| 36 |
+
RES_ATOMS_DICT,
|
| 37 |
+
RNA_STD_RESIDUES,
|
| 38 |
+
STD_RESIDUES,
|
| 39 |
+
)
|
| 40 |
+
from protenix.data.utils import get_starts_by
|
| 41 |
+
|
| 42 |
+
logger = logging.getLogger(__name__)
|
| 43 |
+
|
| 44 |
+
# Ignore inter residue metal coordinate bonds in mmcif _struct_conn
|
| 45 |
+
if "metalc" in pdbx_convert.PDBX_COVALENT_TYPES: # for reload
|
| 46 |
+
pdbx_convert.PDBX_COVALENT_TYPES.remove("metalc")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class MMCIFParser:
|
| 50 |
+
def __init__(self, mmcif_file: Union[str, Path]) -> None:
|
| 51 |
+
self.cif = self._parse(mmcif_file=mmcif_file)
|
| 52 |
+
|
| 53 |
+
def _parse(self, mmcif_file: Union[str, Path]) -> pdbx.CIFFile:
|
| 54 |
+
mmcif_file = Path(mmcif_file)
|
| 55 |
+
if mmcif_file.suffix == ".gz":
|
| 56 |
+
with gzip.open(mmcif_file, "rt") as f:
|
| 57 |
+
cif_file = pdbx.CIFFile.read(f)
|
| 58 |
+
else:
|
| 59 |
+
with open(mmcif_file, "rt") as f:
|
| 60 |
+
cif_file = pdbx.CIFFile.read(f)
|
| 61 |
+
return cif_file
|
| 62 |
+
|
| 63 |
+
def get_category_table(self, name: str) -> Union[pd.DataFrame, None]:
|
| 64 |
+
if name not in self.cif.block:
|
| 65 |
+
return None
|
| 66 |
+
category = self.cif.block[name]
|
| 67 |
+
category_dict = {k: column.as_array() for k, column in category.items()}
|
| 68 |
+
return pd.DataFrame(category_dict, dtype=str)
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def mse_to_met(atom_array: AtomArray) -> AtomArray:
|
| 72 |
+
"""
|
| 73 |
+
Ref: AlphaFold3 SI chapter 2.1
|
| 74 |
+
MSE residues are converted to MET residues.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
AtomArray: Biotite AtomArray object after converted MSE to MET.
|
| 81 |
+
"""
|
| 82 |
+
mse = atom_array.res_name == "MSE"
|
| 83 |
+
se = mse & (atom_array.atom_name == "SE")
|
| 84 |
+
atom_array.atom_name[se] = "SD"
|
| 85 |
+
atom_array.element[se] = "S"
|
| 86 |
+
atom_array.res_name[mse] = "MET"
|
| 87 |
+
atom_array.hetero[mse] = False
|
| 88 |
+
return atom_array
|
| 89 |
+
|
| 90 |
+
@functools.cached_property
|
| 91 |
+
def methods(self) -> list[str]:
|
| 92 |
+
"""the methods to get the structure
|
| 93 |
+
|
| 94 |
+
most of the time, methods only has one method, such as 'X-RAY DIFFRACTION',
|
| 95 |
+
but about 233 entries have multi methods, such as ['X-RAY DIFFRACTION', 'NEUTRON DIFFRACTION'].
|
| 96 |
+
|
| 97 |
+
Allowed Values:
|
| 98 |
+
https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Items/_exptl.method.html
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
list[str]: such as ['X-RAY DIFFRACTION'], ['ELECTRON MICROSCOPY'], ['SOLUTION NMR', 'THEORETICAL MODEL'],
|
| 102 |
+
['X-RAY DIFFRACTION', 'NEUTRON DIFFRACTION'], ['ELECTRON MICROSCOPY', 'SOLUTION NMR'], etc.
|
| 103 |
+
"""
|
| 104 |
+
if "exptl" not in self.cif.block:
|
| 105 |
+
return []
|
| 106 |
+
else:
|
| 107 |
+
methods = self.cif.block["exptl"]["method"]
|
| 108 |
+
return methods.as_array()
|
| 109 |
+
|
| 110 |
+
def get_poly_res_names(
|
| 111 |
+
self, atom_array: Optional[AtomArray] = None
|
| 112 |
+
) -> dict[str, list[str]]:
|
| 113 |
+
"""get 3-letter residue names by combining mmcif._entity_poly_seq and atom_array
|
| 114 |
+
|
| 115 |
+
if ref_atom_array is None: keep first altloc residue of the same res_id based in mmcif._entity_poly_seq
|
| 116 |
+
if ref_atom_array is provided: keep same residue of ref_atom_array.
|
| 117 |
+
|
| 118 |
+
Returns
|
| 119 |
+
dict[str, list[str]]: label_entity_id --> [res_ids, res_names]
|
| 120 |
+
"""
|
| 121 |
+
entity_res_names = {}
|
| 122 |
+
if atom_array is not None:
|
| 123 |
+
# build entity_id -> res_id -> res_name for input atom array
|
| 124 |
+
res_starts = struc.get_residue_starts(atom_array, add_exclusive_stop=False)
|
| 125 |
+
for start in res_starts:
|
| 126 |
+
entity_id = atom_array.label_entity_id[start]
|
| 127 |
+
res_id = atom_array.res_id[start]
|
| 128 |
+
res_name = atom_array.res_name[start]
|
| 129 |
+
if entity_id in entity_res_names:
|
| 130 |
+
entity_res_names[entity_id][res_id] = res_name
|
| 131 |
+
else:
|
| 132 |
+
entity_res_names[entity_id] = {res_id: res_name}
|
| 133 |
+
|
| 134 |
+
# build reference entity atom array, including missing residues
|
| 135 |
+
entity_poly_seq = self.get_category_table("entity_poly_seq")
|
| 136 |
+
if entity_poly_seq is None:
|
| 137 |
+
return {}
|
| 138 |
+
|
| 139 |
+
poly_res_names = {}
|
| 140 |
+
for entity_id, poly_type in self.entity_poly_type.items():
|
| 141 |
+
chain_mask = entity_poly_seq.entity_id == entity_id
|
| 142 |
+
seq_mon_ids = entity_poly_seq.mon_id[chain_mask].to_numpy(dtype=str)
|
| 143 |
+
|
| 144 |
+
# replace all MSE to MET in _entity_poly_seq.mon_id
|
| 145 |
+
seq_mon_ids[seq_mon_ids == "MSE"] = "MET"
|
| 146 |
+
|
| 147 |
+
seq_nums = entity_poly_seq.num[chain_mask].to_numpy(dtype=int)
|
| 148 |
+
|
| 149 |
+
if np.unique(seq_nums).size == seq_nums.size:
|
| 150 |
+
# no altloc residues
|
| 151 |
+
poly_res_names[entity_id] = seq_mon_ids
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
# filter altloc residues, eg: 181 ALA (altloc A); 181 GLY (altloc B)
|
| 155 |
+
select_mask = np.zeros(len(seq_nums), dtype=bool)
|
| 156 |
+
matching_res_id = seq_nums[0]
|
| 157 |
+
for i, res_id in enumerate(seq_nums):
|
| 158 |
+
if res_id != matching_res_id:
|
| 159 |
+
continue
|
| 160 |
+
|
| 161 |
+
res_name_in_atom_array = entity_res_names.get(entity_id, {}).get(res_id)
|
| 162 |
+
if res_name_in_atom_array is None:
|
| 163 |
+
# res_name is mssing in atom_array,
|
| 164 |
+
# keep first altloc residue of the same res_id
|
| 165 |
+
select_mask[i] = True
|
| 166 |
+
else:
|
| 167 |
+
# keep match residue to atom_array
|
| 168 |
+
if res_name_in_atom_array == seq_mon_ids[i]:
|
| 169 |
+
select_mask[i] = True
|
| 170 |
+
|
| 171 |
+
if select_mask[i]:
|
| 172 |
+
matching_res_id += 1
|
| 173 |
+
|
| 174 |
+
seq_mon_ids = seq_mon_ids[select_mask]
|
| 175 |
+
seq_nums = seq_nums[select_mask]
|
| 176 |
+
assert len(seq_nums) == max(seq_nums)
|
| 177 |
+
poly_res_names[entity_id] = seq_mon_ids
|
| 178 |
+
return poly_res_names
|
| 179 |
+
|
| 180 |
+
def get_sequences(self, atom_array=None) -> dict:
|
| 181 |
+
"""get sequence by combining mmcif._entity_poly_seq and atom_array
|
| 182 |
+
|
| 183 |
+
if ref_atom_array is None: keep first altloc residue of the same res_id based in mmcif._entity_poly_seq
|
| 184 |
+
if ref_atom_array is provided: keep same residue of atom_array.
|
| 185 |
+
|
| 186 |
+
Return
|
| 187 |
+
Dict{str:str}: label_entity_id --> canonical_sequence
|
| 188 |
+
"""
|
| 189 |
+
sequences = {}
|
| 190 |
+
for entity_id, res_names in self.get_poly_res_names(atom_array).items():
|
| 191 |
+
seq = ccd.res_names_to_sequence(res_names)
|
| 192 |
+
sequences[entity_id] = seq
|
| 193 |
+
return sequences
|
| 194 |
+
|
| 195 |
+
@functools.cached_property
|
| 196 |
+
def entity_poly_type(self) -> dict[str, str]:
|
| 197 |
+
"""
|
| 198 |
+
Ref: https://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Items/_entity_poly.type.html
|
| 199 |
+
Map entity_id to entity_poly_type.
|
| 200 |
+
|
| 201 |
+
Allowed Value:
|
| 202 |
+
· cyclic-pseudo-peptide
|
| 203 |
+
· other
|
| 204 |
+
· peptide nucleic acid
|
| 205 |
+
· polydeoxyribonucleotide
|
| 206 |
+
· polydeoxyribonucleotide/polyribonucleotide hybrid
|
| 207 |
+
· polypeptide(D)
|
| 208 |
+
· polypeptide(L)
|
| 209 |
+
· polyribonucleotide
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
Dict: a dict of label_entity_id --> entity_poly_type.
|
| 213 |
+
"""
|
| 214 |
+
entity_poly = self.get_category_table("entity_poly")
|
| 215 |
+
if entity_poly is None:
|
| 216 |
+
return {}
|
| 217 |
+
|
| 218 |
+
return {i: t for i, t in zip(entity_poly.entity_id, entity_poly.type)}
|
| 219 |
+
|
| 220 |
+
def filter_altloc(self, atom_array: AtomArray, altloc: str = "first"):
|
| 221 |
+
"""
|
| 222 |
+
altloc: "first", "A", "B", "global_largest", etc
|
| 223 |
+
|
| 224 |
+
Filter first alternate coformation (altloc) of a given AtomArray.
|
| 225 |
+
- normally first altloc_id is 'A'
|
| 226 |
+
- but in one case, first altloc_id is '1' in 6uwi.cif
|
| 227 |
+
|
| 228 |
+
biotite v0.41 can not handle diff res_name at same res_id.
|
| 229 |
+
For example, in 2pxs.cif, there are two res_name (XYG|DYG) at res_id 63,
|
| 230 |
+
need to keep the first XYG.
|
| 231 |
+
"""
|
| 232 |
+
if altloc == "all":
|
| 233 |
+
return atom_array
|
| 234 |
+
|
| 235 |
+
altloc_id = altloc
|
| 236 |
+
if altloc == "first":
|
| 237 |
+
letter_altloc_ids = np.unique(atom_array.label_alt_id)
|
| 238 |
+
if len(letter_altloc_ids) == 1 and letter_altloc_ids[0] == ".":
|
| 239 |
+
return atom_array
|
| 240 |
+
letter_altloc_ids = letter_altloc_ids[letter_altloc_ids != "."]
|
| 241 |
+
altloc_id = np.sort(letter_altloc_ids)[0]
|
| 242 |
+
|
| 243 |
+
return atom_array[np.isin(atom_array.label_alt_id, [altloc_id, "."])]
|
| 244 |
+
|
| 245 |
+
@staticmethod
|
| 246 |
+
def replace_auth_with_label(atom_array: AtomArray) -> AtomArray:
|
| 247 |
+
# fix issue https://github.com/biotite-dev/biotite/issues/553
|
| 248 |
+
atom_array.chain_id = atom_array.label_asym_id
|
| 249 |
+
|
| 250 |
+
# reset ligand res_id
|
| 251 |
+
res_id = copy.deepcopy(atom_array.label_seq_id)
|
| 252 |
+
chain_starts = get_chain_starts(atom_array, add_exclusive_stop=True)
|
| 253 |
+
for chain_start, chain_stop in zip(chain_starts[:-1], chain_starts[1:]):
|
| 254 |
+
if atom_array.label_seq_id[chain_start] != ".":
|
| 255 |
+
continue
|
| 256 |
+
else:
|
| 257 |
+
res_starts = get_residue_starts(
|
| 258 |
+
atom_array[chain_start:chain_stop], add_exclusive_stop=True
|
| 259 |
+
)
|
| 260 |
+
num = 1
|
| 261 |
+
for res_start, res_stop in zip(res_starts[:-1], res_starts[1:]):
|
| 262 |
+
res_id[chain_start:chain_stop][res_start:res_stop] = num
|
| 263 |
+
num += 1
|
| 264 |
+
|
| 265 |
+
atom_array.res_id = res_id.astype(int)
|
| 266 |
+
return atom_array
|
| 267 |
+
|
| 268 |
+
def get_structure(
|
| 269 |
+
self,
|
| 270 |
+
altloc: str = "first",
|
| 271 |
+
model: int = 1,
|
| 272 |
+
bond_lenth_threshold: Union[float, None] = 2.4,
|
| 273 |
+
) -> AtomArray:
|
| 274 |
+
"""
|
| 275 |
+
Get an AtomArray created by bioassembly of MMCIF.
|
| 276 |
+
|
| 277 |
+
altloc: "first", "all", "A", "B", etc
|
| 278 |
+
model: the model number of the structure.
|
| 279 |
+
bond_lenth_threshold: the threshold of bond length. If None, no filter will be applied.
|
| 280 |
+
Default is 2.4 Angstroms.
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
AtomArray: Biotite AtomArray object created by bioassembly of MMCIF.
|
| 284 |
+
"""
|
| 285 |
+
use_author_fields = True
|
| 286 |
+
extra_fields = ["label_asym_id", "label_entity_id", "auth_asym_id"] # chain
|
| 287 |
+
extra_fields += ["label_seq_id", "auth_seq_id"] # residue
|
| 288 |
+
atom_site_fields = {
|
| 289 |
+
"occupancy": "occupancy",
|
| 290 |
+
"pdbx_formal_charge": "charge",
|
| 291 |
+
"B_iso_or_equiv": "b_factor",
|
| 292 |
+
"label_alt_id": "label_alt_id",
|
| 293 |
+
} # atom
|
| 294 |
+
for atom_site_name, alt_name in atom_site_fields.items():
|
| 295 |
+
if atom_site_name in self.cif.block["atom_site"]:
|
| 296 |
+
extra_fields.append(alt_name)
|
| 297 |
+
|
| 298 |
+
block = self.cif.block
|
| 299 |
+
|
| 300 |
+
extra_fields = set(extra_fields)
|
| 301 |
+
|
| 302 |
+
atom_site = block.get("atom_site")
|
| 303 |
+
|
| 304 |
+
models = atom_site["pdbx_PDB_model_num"].as_array(np.int32)
|
| 305 |
+
model_starts = pdbx_convert._get_model_starts(models)
|
| 306 |
+
model_count = len(model_starts)
|
| 307 |
+
|
| 308 |
+
if model == 0:
|
| 309 |
+
raise ValueError("The model index must not be 0")
|
| 310 |
+
# Negative models mean model indexing starting from last model
|
| 311 |
+
|
| 312 |
+
model = model_count + model + 1 if model < 0 else model
|
| 313 |
+
if model > model_count:
|
| 314 |
+
raise ValueError(
|
| 315 |
+
f"The file has {model_count} models, "
|
| 316 |
+
f"the given model {model} does not exist"
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
model_atom_site = pdbx_convert._filter_model(atom_site, model_starts, model)
|
| 320 |
+
# Any field of the category would work here to get the length
|
| 321 |
+
model_length = model_atom_site.row_count
|
| 322 |
+
atoms = AtomArray(model_length)
|
| 323 |
+
|
| 324 |
+
atoms.coord[:, 0] = model_atom_site["Cartn_x"].as_array(np.float32)
|
| 325 |
+
atoms.coord[:, 1] = model_atom_site["Cartn_y"].as_array(np.float32)
|
| 326 |
+
atoms.coord[:, 2] = model_atom_site["Cartn_z"].as_array(np.float32)
|
| 327 |
+
|
| 328 |
+
atoms.box = pdbx_convert._get_box(block)
|
| 329 |
+
|
| 330 |
+
# The below part is the same for both, AtomArray and AtomArrayStack
|
| 331 |
+
pdbx_convert._fill_annotations(
|
| 332 |
+
atoms, model_atom_site, extra_fields, use_author_fields
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
bonds = struc.connect_via_residue_names(atoms, inter_residue=False)
|
| 336 |
+
if "struct_conn" in block:
|
| 337 |
+
conn_bonds = pdbx_convert._parse_inter_residue_bonds(
|
| 338 |
+
model_atom_site, block["struct_conn"]
|
| 339 |
+
)
|
| 340 |
+
coord1 = atoms.coord[conn_bonds._bonds[:, 0]]
|
| 341 |
+
coord2 = atoms.coord[conn_bonds._bonds[:, 1]]
|
| 342 |
+
dist = np.linalg.norm(coord1 - coord2, axis=1)
|
| 343 |
+
if bond_lenth_threshold is not None:
|
| 344 |
+
conn_bonds._bonds = conn_bonds._bonds[dist < bond_lenth_threshold]
|
| 345 |
+
bonds = bonds.merge(conn_bonds)
|
| 346 |
+
atoms.bonds = bonds
|
| 347 |
+
|
| 348 |
+
atom_array = self.filter_altloc(atoms, altloc=altloc)
|
| 349 |
+
|
| 350 |
+
# inference inter residue bonds based on res_id (auth_seq_id) and label_asym_id.
|
| 351 |
+
atom_array = ccd.add_inter_residue_bonds(
|
| 352 |
+
atom_array,
|
| 353 |
+
exclude_struct_conn_pairs=True,
|
| 354 |
+
remove_far_inter_chain_pairs=True,
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# use label_seq_id to match seq and structure
|
| 358 |
+
atom_array = self.replace_auth_with_label(atom_array)
|
| 359 |
+
|
| 360 |
+
# inference inter residue bonds based on new res_id (label_seq_id).
|
| 361 |
+
# the auth_seq_id is not reliable, some are discontinuous (8bvh), some with insertion codes (6ydy).
|
| 362 |
+
atom_array = ccd.add_inter_residue_bonds(
|
| 363 |
+
atom_array, exclude_struct_conn_pairs=True
|
| 364 |
+
)
|
| 365 |
+
return atom_array
|
| 366 |
+
|
| 367 |
+
def expand_assembly(
|
| 368 |
+
self, structure: AtomArray, assembly_id: str = "1"
|
| 369 |
+
) -> AtomArray:
|
| 370 |
+
"""
|
| 371 |
+
Expand the given assembly to all chains
|
| 372 |
+
copy from biotite.structure.io.pdbx.get_assembly
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
structure (AtomArray): The AtomArray of the structure to expand.
|
| 376 |
+
assembly_id (str, optional): The assembly ID in mmCIF file. Defaults to "1".
|
| 377 |
+
If assembly_id is "all", all assemblies will be returned.
|
| 378 |
+
|
| 379 |
+
Returns:
|
| 380 |
+
AtomArray: The assembly AtomArray.
|
| 381 |
+
"""
|
| 382 |
+
block = self.cif.block
|
| 383 |
+
|
| 384 |
+
try:
|
| 385 |
+
assembly_gen_category = block["pdbx_struct_assembly_gen"]
|
| 386 |
+
except KeyError:
|
| 387 |
+
logging.info(
|
| 388 |
+
"File has no 'pdbx_struct_assembly_gen' category, return original structure."
|
| 389 |
+
)
|
| 390 |
+
return structure
|
| 391 |
+
|
| 392 |
+
try:
|
| 393 |
+
struct_oper_category = block["pdbx_struct_oper_list"]
|
| 394 |
+
except KeyError:
|
| 395 |
+
logging.info(
|
| 396 |
+
"File has no 'pdbx_struct_oper_list' category, return original structure."
|
| 397 |
+
)
|
| 398 |
+
return structure
|
| 399 |
+
|
| 400 |
+
assembly_ids = assembly_gen_category["assembly_id"].as_array(str)
|
| 401 |
+
|
| 402 |
+
if assembly_id != "all":
|
| 403 |
+
if assembly_id is None:
|
| 404 |
+
assembly_id = assembly_ids[0]
|
| 405 |
+
elif assembly_id not in assembly_ids:
|
| 406 |
+
raise KeyError(f"File has no Assembly ID '{assembly_id}'")
|
| 407 |
+
|
| 408 |
+
### Calculate all possible transformations
|
| 409 |
+
transformations = pdbx_convert._get_transformations(struct_oper_category)
|
| 410 |
+
|
| 411 |
+
### Get transformations and apply them to the affected asym IDs
|
| 412 |
+
assembly = None
|
| 413 |
+
assembly_1_mask = []
|
| 414 |
+
for id, op_expr, asym_id_expr in zip(
|
| 415 |
+
assembly_gen_category["assembly_id"].as_array(str),
|
| 416 |
+
assembly_gen_category["oper_expression"].as_array(str),
|
| 417 |
+
assembly_gen_category["asym_id_list"].as_array(str),
|
| 418 |
+
):
|
| 419 |
+
# Find the operation expressions for given assembly ID
|
| 420 |
+
# We already asserted that the ID is actually present
|
| 421 |
+
if assembly_id == "all" or id == assembly_id:
|
| 422 |
+
operations = pdbx_convert._parse_operation_expression(op_expr)
|
| 423 |
+
asym_ids = asym_id_expr.split(",")
|
| 424 |
+
# Filter affected asym IDs
|
| 425 |
+
sub_structure = copy.deepcopy(
|
| 426 |
+
structure[..., np.isin(structure.label_asym_id, asym_ids)]
|
| 427 |
+
)
|
| 428 |
+
sub_assembly = pdbx_convert._apply_transformations(
|
| 429 |
+
sub_structure, transformations, operations
|
| 430 |
+
)
|
| 431 |
+
# Merge the chains with asym IDs for this operation
|
| 432 |
+
# with chains from other operations
|
| 433 |
+
if assembly is None:
|
| 434 |
+
assembly = sub_assembly
|
| 435 |
+
else:
|
| 436 |
+
assembly += sub_assembly
|
| 437 |
+
|
| 438 |
+
if id == "1":
|
| 439 |
+
assembly_1_mask.extend([True] * len(sub_assembly))
|
| 440 |
+
else:
|
| 441 |
+
assembly_1_mask.extend([False] * len(sub_assembly))
|
| 442 |
+
|
| 443 |
+
if assembly_id == "1" or assembly_id == "all":
|
| 444 |
+
assembly.set_annotation("assembly_1", np.array(assembly_1_mask))
|
| 445 |
+
return assembly
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
class AddAtomArrayAnnot(object):
|
| 449 |
+
"""
|
| 450 |
+
The methods in this class are all designed to add annotations to an AtomArray
|
| 451 |
+
without altering the information in the original AtomArray.
|
| 452 |
+
"""
|
| 453 |
+
|
| 454 |
+
@staticmethod
|
| 455 |
+
def add_token_mol_type(
|
| 456 |
+
atom_array: AtomArray, sequences: dict[str, str]
|
| 457 |
+
) -> AtomArray:
|
| 458 |
+
"""
|
| 459 |
+
Add molecule types in atom_arry.mol_type based on ccd pdbx_type.
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 463 |
+
sequences (dict[str, str]): A dict of label_entity_id --> canonical_sequence
|
| 464 |
+
|
| 465 |
+
Return
|
| 466 |
+
AtomArray: add atom_arry.mol_type = "protein" | "rna" | "dna" | "ligand"
|
| 467 |
+
"""
|
| 468 |
+
mol_types = np.zeros(len(atom_array), dtype="U7")
|
| 469 |
+
starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 470 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 471 |
+
entity_id = atom_array.label_entity_id[start]
|
| 472 |
+
if entity_id not in sequences:
|
| 473 |
+
# non-poly is ligand
|
| 474 |
+
mol_types[start:stop] = "ligand"
|
| 475 |
+
continue
|
| 476 |
+
res_name = atom_array.res_name[start]
|
| 477 |
+
|
| 478 |
+
mol_types[start:stop] = ccd.get_mol_type(res_name)
|
| 479 |
+
|
| 480 |
+
atom_array.set_annotation("mol_type", mol_types)
|
| 481 |
+
return atom_array
|
| 482 |
+
|
| 483 |
+
@staticmethod
|
| 484 |
+
def add_atom_mol_type_mask(atom_array: AtomArray) -> AtomArray:
|
| 485 |
+
"""
|
| 486 |
+
Mask indicates is_protein / rna / dna / ligand.
|
| 487 |
+
It is atom-level which is different with paper (token-level).
|
| 488 |
+
The type of each atom is determined based on the most frequently
|
| 489 |
+
occurring type in the chain to which it belongs.
|
| 490 |
+
|
| 491 |
+
Args:
|
| 492 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 493 |
+
|
| 494 |
+
Returns:
|
| 495 |
+
AtomArray: Biotite AtomArray object with
|
| 496 |
+
"is_ligand", "is_dna", "is_rna", "is_protein" annotation added.
|
| 497 |
+
"""
|
| 498 |
+
# it should be called after mmcif_parser.add_token_mol_type
|
| 499 |
+
chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
|
| 500 |
+
chain_mol_type = []
|
| 501 |
+
for start, end in zip(chain_starts[:-1], chain_starts[1:]):
|
| 502 |
+
mol_types = atom_array.mol_type[start:end]
|
| 503 |
+
mol_type_count = Counter(mol_types)
|
| 504 |
+
most_freq_mol_type = max(mol_type_count, key=mol_type_count.get)
|
| 505 |
+
chain_mol_type.extend([most_freq_mol_type] * (end - start))
|
| 506 |
+
atom_array.set_annotation("chain_mol_type", chain_mol_type)
|
| 507 |
+
|
| 508 |
+
for type_str in ["ligand", "dna", "rna", "protein"]:
|
| 509 |
+
mask = (atom_array.chain_mol_type == type_str).astype(int)
|
| 510 |
+
atom_array.set_annotation(f"is_{type_str}", mask)
|
| 511 |
+
return atom_array
|
| 512 |
+
|
| 513 |
+
@staticmethod
|
| 514 |
+
def add_modified_res_mask(atom_array: AtomArray) -> AtomArray:
|
| 515 |
+
"""
|
| 516 |
+
Ref: AlphaFold3 SI Chapter 5.9.3
|
| 517 |
+
|
| 518 |
+
Determine if an atom belongs to a modified residue,
|
| 519 |
+
which is used to calculate the Modified Residue Scores in sample ranking:
|
| 520 |
+
Modified residue scores are ranked according to the average pLDDT of the modified residue.
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 524 |
+
|
| 525 |
+
Returns:
|
| 526 |
+
AtomArray: Biotite AtomArray object with
|
| 527 |
+
"modified_res_mask" annotation added.
|
| 528 |
+
"""
|
| 529 |
+
modified_res_mask = []
|
| 530 |
+
starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 531 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 532 |
+
res_name = atom_array.res_name[start]
|
| 533 |
+
mol_type = atom_array.mol_type[start]
|
| 534 |
+
res_atom_nums = stop - start
|
| 535 |
+
if res_name not in STD_RESIDUES and mol_type != "ligand":
|
| 536 |
+
modified_res_mask.extend([1] * res_atom_nums)
|
| 537 |
+
else:
|
| 538 |
+
modified_res_mask.extend([0] * res_atom_nums)
|
| 539 |
+
atom_array.set_annotation("modified_res_mask", modified_res_mask)
|
| 540 |
+
return atom_array
|
| 541 |
+
|
| 542 |
+
@staticmethod
|
| 543 |
+
def add_centre_atom_mask(atom_array: AtomArray) -> AtomArray:
|
| 544 |
+
"""
|
| 545 |
+
Ref: AlphaFold3 SI Chapter 2.6
|
| 546 |
+
• A standard amino acid residue (Table 13) is represented as a single token.
|
| 547 |
+
• A standard nucleotide residue (Table 13) is represented as a single token.
|
| 548 |
+
• A modified amino acid or nucleotide residue is tokenized per-atom (i.e. N tokens for an N-atom residue)
|
| 549 |
+
• All ligands are tokenized per-atom
|
| 550 |
+
For each token we also designate a token centre atom, used in various places below:
|
| 551 |
+
• Cα for standard amino acids
|
| 552 |
+
• C1′ for standard nucleotides
|
| 553 |
+
• For other cases take the first and only atom as they are tokenized per-atom.
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 557 |
+
|
| 558 |
+
Returns:
|
| 559 |
+
AtomArray: Biotite AtomArray object with "centre_atom_mask" annotation added.
|
| 560 |
+
"""
|
| 561 |
+
res_name = list(STD_RESIDUES.keys())
|
| 562 |
+
std_res = np.isin(atom_array.res_name, res_name) & (
|
| 563 |
+
atom_array.mol_type != "ligand"
|
| 564 |
+
)
|
| 565 |
+
prot_res = np.char.str_len(atom_array.res_name) == 3
|
| 566 |
+
prot_centre_atom = prot_res & (atom_array.atom_name == "CA")
|
| 567 |
+
nuc_centre_atom = (~prot_res) & (atom_array.atom_name == r"C1'")
|
| 568 |
+
not_std_res = ~std_res
|
| 569 |
+
centre_atom_mask = (
|
| 570 |
+
std_res & (prot_centre_atom | nuc_centre_atom)
|
| 571 |
+
) | not_std_res
|
| 572 |
+
centre_atom_mask = centre_atom_mask.astype(int)
|
| 573 |
+
atom_array.set_annotation("centre_atom_mask", centre_atom_mask)
|
| 574 |
+
return atom_array
|
| 575 |
+
|
| 576 |
+
@staticmethod
|
| 577 |
+
def add_distogram_rep_atom_mask(atom_array: AtomArray) -> AtomArray:
|
| 578 |
+
"""
|
| 579 |
+
Ref: AlphaFold3 SI Chapter 4.4
|
| 580 |
+
the representative atom mask for each token for distogram head
|
| 581 |
+
• Cβ for protein residues (Cα for glycine),
|
| 582 |
+
• C4 for purines and C2 for pyrimidines.
|
| 583 |
+
• All ligands already have a single atom per token.
|
| 584 |
+
|
| 585 |
+
Due to the lack of explanation regarding the handling of "N" and "DN" in the article,
|
| 586 |
+
it is impossible to determine the representative atom based on whether it is a purine or pyrimidine.
|
| 587 |
+
Therefore, C1' is chosen as the representative atom for both "N" and "DN".
|
| 588 |
+
|
| 589 |
+
Args:
|
| 590 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 591 |
+
|
| 592 |
+
Returns:
|
| 593 |
+
AtomArray: Biotite AtomArray object with "distogram_rep_atom_mask" annotation added.
|
| 594 |
+
"""
|
| 595 |
+
std_res = np.isin(atom_array.res_name, list(STD_RESIDUES.keys())) & (
|
| 596 |
+
atom_array.mol_type != "ligand"
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
# for protein std res
|
| 600 |
+
std_prot_res = std_res & (np.char.str_len(atom_array.res_name) == 3)
|
| 601 |
+
gly = atom_array.res_name == "GLY"
|
| 602 |
+
prot_cb = std_prot_res & (~gly) & (atom_array.atom_name == "CB")
|
| 603 |
+
prot_gly_ca = gly & (atom_array.atom_name == "CA")
|
| 604 |
+
|
| 605 |
+
# for nucleotide std res
|
| 606 |
+
purines_c4 = np.isin(atom_array.res_name, ["DA", "DG", "A", "G"]) & (
|
| 607 |
+
atom_array.atom_name == "C4"
|
| 608 |
+
)
|
| 609 |
+
pyrimidines_c2 = np.isin(atom_array.res_name, ["DC", "DT", "C", "U"]) & (
|
| 610 |
+
atom_array.atom_name == "C2"
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
# for nucleotide unk res
|
| 614 |
+
unk_nuc = np.isin(atom_array.res_name, ["DN", "N"]) & (
|
| 615 |
+
atom_array.atom_name == r"C1'"
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
distogram_rep_atom_mask = (
|
| 619 |
+
prot_cb | prot_gly_ca | purines_c4 | pyrimidines_c2 | unk_nuc
|
| 620 |
+
) | (~std_res)
|
| 621 |
+
distogram_rep_atom_mask = distogram_rep_atom_mask.astype(int)
|
| 622 |
+
|
| 623 |
+
atom_array.set_annotation("distogram_rep_atom_mask", distogram_rep_atom_mask)
|
| 624 |
+
|
| 625 |
+
assert np.sum(atom_array.distogram_rep_atom_mask) == np.sum(
|
| 626 |
+
atom_array.centre_atom_mask
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
return atom_array
|
| 630 |
+
|
| 631 |
+
@staticmethod
|
| 632 |
+
def add_plddt_m_rep_atom_mask(atom_array: AtomArray) -> AtomArray:
|
| 633 |
+
"""
|
| 634 |
+
Ref: AlphaFold3 SI Chapter 4.3.1
|
| 635 |
+
the representative atom for plddt loss
|
| 636 |
+
• Atoms such that the distance in the ground truth between atom l and atom m is less than 15 Å
|
| 637 |
+
if m is a protein atom or less than 30 Å if m is a nucleic acid atom.
|
| 638 |
+
• Only atoms in polymer chains.
|
| 639 |
+
• One atom per token - Cα for standard protein residues
|
| 640 |
+
and C1′ for standard nucleic acid residues.
|
| 641 |
+
|
| 642 |
+
Args:
|
| 643 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 644 |
+
|
| 645 |
+
Returns:
|
| 646 |
+
AtomArray: Biotite AtomArray object with "plddt_m_rep_atom_mask" annotation added.
|
| 647 |
+
"""
|
| 648 |
+
std_res = np.isin(atom_array.res_name, list(STD_RESIDUES.keys())) & (
|
| 649 |
+
atom_array.mol_type != "ligand"
|
| 650 |
+
)
|
| 651 |
+
ca_or_c1 = (atom_array.atom_name == "CA") | (atom_array.atom_name == r"C1'")
|
| 652 |
+
plddt_m_rep_atom_mask = (std_res & ca_or_c1).astype(int)
|
| 653 |
+
atom_array.set_annotation("plddt_m_rep_atom_mask", plddt_m_rep_atom_mask)
|
| 654 |
+
return atom_array
|
| 655 |
+
|
| 656 |
+
@staticmethod
|
| 657 |
+
def add_ref_space_uid(atom_array: AtomArray) -> AtomArray:
|
| 658 |
+
"""
|
| 659 |
+
Ref: AlphaFold3 SI Chapter 2.8 Table 5
|
| 660 |
+
Numerical encoding of the chain id and residue index associated with this reference conformer.
|
| 661 |
+
Each (chain id, residue index) tuple is assigned an integer on first appearance.
|
| 662 |
+
|
| 663 |
+
Args:
|
| 664 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 665 |
+
|
| 666 |
+
Returns:
|
| 667 |
+
AtomArray: Biotite AtomArray object with "ref_space_uid" annotation added.
|
| 668 |
+
"""
|
| 669 |
+
# [N_atom, 2]
|
| 670 |
+
chain_res_id = np.vstack((atom_array.asym_id_int, atom_array.res_id)).T
|
| 671 |
+
unique_id = np.unique(chain_res_id, axis=0)
|
| 672 |
+
|
| 673 |
+
mapping_dict = {}
|
| 674 |
+
for idx, chain_res_id_pair in enumerate(unique_id):
|
| 675 |
+
asym_id_int, res_id = chain_res_id_pair
|
| 676 |
+
mapping_dict[(asym_id_int, res_id)] = idx
|
| 677 |
+
|
| 678 |
+
ref_space_uid = [
|
| 679 |
+
mapping_dict[(asym_id_int, res_id)] for asym_id_int, res_id in chain_res_id
|
| 680 |
+
]
|
| 681 |
+
atom_array.set_annotation("ref_space_uid", ref_space_uid)
|
| 682 |
+
return atom_array
|
| 683 |
+
|
| 684 |
+
@staticmethod
|
| 685 |
+
def add_cano_seq_resname(atom_array: AtomArray) -> AtomArray:
|
| 686 |
+
"""
|
| 687 |
+
Assign to each atom the three-letter residue name (resname)
|
| 688 |
+
corresponding to its place in the canonical sequences.
|
| 689 |
+
Non-standard residues are mapped to standard ones.
|
| 690 |
+
Residues that cannot be mapped to standard residues and ligands are all labeled as "UNK".
|
| 691 |
+
|
| 692 |
+
Note: Some CCD Codes in the canonical sequence are mapped to three letters. It is labeled as one "UNK".
|
| 693 |
+
|
| 694 |
+
Args:
|
| 695 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 696 |
+
|
| 697 |
+
Returns:
|
| 698 |
+
AtomArray: Biotite AtomArray object with "cano_seq_resname" annotation added.
|
| 699 |
+
"""
|
| 700 |
+
cano_seq_resname = []
|
| 701 |
+
starts = struc.get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 702 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 703 |
+
res_atom_nums = stop - start
|
| 704 |
+
mol_type = atom_array.mol_type[start]
|
| 705 |
+
resname = atom_array.res_name[start]
|
| 706 |
+
|
| 707 |
+
one_letter_code = ccd.get_one_letter_code(resname)
|
| 708 |
+
if one_letter_code is None or len(one_letter_code) != 1:
|
| 709 |
+
# Some non-standard residues cannot be mapped back to one standard residue.
|
| 710 |
+
one_letter_code = "X" if mol_type == "protein" else "N"
|
| 711 |
+
|
| 712 |
+
if mol_type == "protein":
|
| 713 |
+
res_name_in_cano_seq = PROT_STD_RESIDUES_ONE_TO_THREE.get(
|
| 714 |
+
one_letter_code, "UNK"
|
| 715 |
+
)
|
| 716 |
+
elif mol_type == "dna":
|
| 717 |
+
res_name_in_cano_seq = "D" + one_letter_code
|
| 718 |
+
if res_name_in_cano_seq not in DNA_STD_RESIDUES:
|
| 719 |
+
res_name_in_cano_seq = "DN"
|
| 720 |
+
elif mol_type == "rna":
|
| 721 |
+
res_name_in_cano_seq = one_letter_code
|
| 722 |
+
if res_name_in_cano_seq not in RNA_STD_RESIDUES:
|
| 723 |
+
res_name_in_cano_seq = "N"
|
| 724 |
+
else:
|
| 725 |
+
# some molecules attached to a polymer like ATP-RNA. e.g.
|
| 726 |
+
res_name_in_cano_seq = "UNK"
|
| 727 |
+
|
| 728 |
+
cano_seq_resname.extend([res_name_in_cano_seq] * res_atom_nums)
|
| 729 |
+
|
| 730 |
+
atom_array.set_annotation("cano_seq_resname", cano_seq_resname)
|
| 731 |
+
return atom_array
|
| 732 |
+
|
| 733 |
+
@staticmethod
|
| 734 |
+
def remove_bonds_between_polymer_chains(
|
| 735 |
+
atom_array: AtomArray, entity_poly_type: dict[str, str]
|
| 736 |
+
) -> struc.BondList:
|
| 737 |
+
"""
|
| 738 |
+
Remove bonds between polymer chains based on entity_poly_type
|
| 739 |
+
|
| 740 |
+
Args:
|
| 741 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 742 |
+
entity_poly_type (dict[str, str]): entity_id to poly_type
|
| 743 |
+
|
| 744 |
+
Returns:
|
| 745 |
+
BondList: Biotite BondList object (copy) with bonds between polymer chains removed
|
| 746 |
+
"""
|
| 747 |
+
copy = atom_array.bonds.copy()
|
| 748 |
+
polymer_mask = np.isin(
|
| 749 |
+
atom_array.label_entity_id, list(entity_poly_type.keys())
|
| 750 |
+
)
|
| 751 |
+
i = copy._bonds[:, 0]
|
| 752 |
+
j = copy._bonds[:, 1]
|
| 753 |
+
pp_bond_mask = polymer_mask[i] & polymer_mask[j]
|
| 754 |
+
diff_chain_mask = atom_array.chain_id[i] != atom_array.chain_id[j]
|
| 755 |
+
pp_bond_mask = pp_bond_mask & diff_chain_mask
|
| 756 |
+
copy._bonds = copy._bonds[~pp_bond_mask]
|
| 757 |
+
|
| 758 |
+
# post-process after modified bonds manually
|
| 759 |
+
# due to the extraction of bonds using a mask, the lower one of the two atom indices is still in the first
|
| 760 |
+
copy._remove_redundant_bonds()
|
| 761 |
+
copy._max_bonds_per_atom = copy._get_max_bonds_per_atom()
|
| 762 |
+
return copy
|
| 763 |
+
|
| 764 |
+
@staticmethod
|
| 765 |
+
def find_equiv_mol_and_assign_ids(
|
| 766 |
+
atom_array: AtomArray,
|
| 767 |
+
entity_poly_type: Optional[dict[str, str]] = None,
|
| 768 |
+
check_final_equiv: bool = True,
|
| 769 |
+
) -> AtomArray:
|
| 770 |
+
"""
|
| 771 |
+
Assign a unique integer to each molecule in the structure.
|
| 772 |
+
All atoms connected by covalent bonds are considered as a molecule, with unique mol_id (int).
|
| 773 |
+
different copies of same molecule will assign same entity_mol_id (int).
|
| 774 |
+
for each mol, assign mol_atom_index starting from 0.
|
| 775 |
+
|
| 776 |
+
Args:
|
| 777 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 778 |
+
entity_poly_type (Optional[dict[str, str]]): label_entity_id to entity.poly_type.
|
| 779 |
+
Defaults to None.
|
| 780 |
+
check_final_equiv (bool, optional): check if the final mol_ids of same entity_mol_id are all equivalent.
|
| 781 |
+
|
| 782 |
+
Returns:
|
| 783 |
+
AtomArray: Biotite AtomArray object with new annotations
|
| 784 |
+
- mol_id: atoms with covalent bonds connected, 0-based int
|
| 785 |
+
- entity_mol_id: equivalent molecules will assign same entity_mol_id, 0-based int
|
| 786 |
+
- mol_residue_index: mol_atom_index for each mol, 0-based int
|
| 787 |
+
"""
|
| 788 |
+
# Re-assign mol_id to AtomArray after break asym bonds
|
| 789 |
+
if entity_poly_type is None:
|
| 790 |
+
mol_indices: list[np.ndarray] = get_molecule_indices(atom_array)
|
| 791 |
+
else:
|
| 792 |
+
bonds_filtered = AddAtomArrayAnnot.remove_bonds_between_polymer_chains(
|
| 793 |
+
atom_array, entity_poly_type
|
| 794 |
+
)
|
| 795 |
+
mol_indices: list[np.ndarray] = get_molecule_indices(bonds_filtered)
|
| 796 |
+
|
| 797 |
+
# assign mol_id
|
| 798 |
+
mol_ids = np.array([-1] * len(atom_array), dtype=np.int32)
|
| 799 |
+
for mol_id, atom_indices in enumerate(mol_indices):
|
| 800 |
+
mol_ids[atom_indices] = mol_id
|
| 801 |
+
atom_array.set_annotation("mol_id", mol_ids)
|
| 802 |
+
|
| 803 |
+
assert ~np.isin(-1, atom_array.mol_id), "Some mol_id is not assigned."
|
| 804 |
+
assert len(np.unique(atom_array.mol_id)) == len(
|
| 805 |
+
mol_indices
|
| 806 |
+
), "Some mol_id is duplicated."
|
| 807 |
+
|
| 808 |
+
# assign entity_mol_id
|
| 809 |
+
# --------------------
|
| 810 |
+
# first atom of mol with infos in attrubites, eg: info.num_atoms, info.bonds, ...
|
| 811 |
+
ref_mol_infos = []
|
| 812 |
+
# perm for keep multiple chains in one mol are together and in same chain order
|
| 813 |
+
new_atom_perm = []
|
| 814 |
+
chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=False)
|
| 815 |
+
entity_mol_ids = np.zeros_like(mol_ids)
|
| 816 |
+
for mol_id, atom_indices in enumerate(mol_indices):
|
| 817 |
+
atom_indices = np.sort(atom_indices)
|
| 818 |
+
# keep multiple chains-mol has same chain order in different copies
|
| 819 |
+
chain_perm = np.argsort(
|
| 820 |
+
atom_array.label_entity_id[atom_indices], kind="stable"
|
| 821 |
+
)
|
| 822 |
+
atom_indices = atom_indices[chain_perm]
|
| 823 |
+
# save indices for finally re-ordering atom_array
|
| 824 |
+
new_atom_perm.extend(atom_indices)
|
| 825 |
+
|
| 826 |
+
# check mol equal, keep chain order consistent with atom_indices
|
| 827 |
+
mol_chain_mask = np.isin(atom_indices, chain_starts)
|
| 828 |
+
entity_ids = atom_array.label_entity_id[atom_indices][
|
| 829 |
+
mol_chain_mask
|
| 830 |
+
].tolist()
|
| 831 |
+
|
| 832 |
+
match_entity_mol_id = None
|
| 833 |
+
for entity_mol_id, mol_info in enumerate(ref_mol_infos):
|
| 834 |
+
# check mol equal
|
| 835 |
+
# same entity_ids and same atom name will assign same entity_mol_id
|
| 836 |
+
if entity_ids != mol_info.entity_ids:
|
| 837 |
+
continue
|
| 838 |
+
|
| 839 |
+
if len(atom_indices) != len(mol_info.atom_name):
|
| 840 |
+
continue
|
| 841 |
+
|
| 842 |
+
atom_name_not_equal = (
|
| 843 |
+
atom_array.atom_name[atom_indices] != mol_info.atom_name
|
| 844 |
+
)
|
| 845 |
+
if np.any(atom_name_not_equal):
|
| 846 |
+
diff_indices = np.where(atom_name_not_equal)[0]
|
| 847 |
+
query_atom = atom_array[atom_indices[diff_indices[0]]]
|
| 848 |
+
ref_atom = atom_array[mol_info.atom_indices[diff_indices[0]]]
|
| 849 |
+
logger.warning(
|
| 850 |
+
f"Two mols have entity_ids and same number of atoms, but diff atom name:\n{query_atom=}\n{ ref_atom=}"
|
| 851 |
+
)
|
| 852 |
+
continue
|
| 853 |
+
|
| 854 |
+
# pass all checks, it is a match
|
| 855 |
+
match_entity_mol_id = entity_mol_id
|
| 856 |
+
break
|
| 857 |
+
|
| 858 |
+
if match_entity_mol_id is None: # not found match mol
|
| 859 |
+
# use first atom as a placeholder for mol info.
|
| 860 |
+
mol_info = atom_array[atom_indices[0]]
|
| 861 |
+
mol_info.atom_indices = atom_indices
|
| 862 |
+
mol_info.entity_ids = entity_ids
|
| 863 |
+
mol_info.atom_name = atom_array.atom_name[atom_indices]
|
| 864 |
+
mol_info.entity_mol_id = len(ref_mol_infos)
|
| 865 |
+
ref_mol_infos.append(mol_info)
|
| 866 |
+
match_entity_mol_id = mol_info.entity_mol_id
|
| 867 |
+
|
| 868 |
+
entity_mol_ids[atom_indices] = match_entity_mol_id
|
| 869 |
+
|
| 870 |
+
atom_array.set_annotation("entity_mol_id", entity_mol_ids)
|
| 871 |
+
|
| 872 |
+
# re-order atom_array to make atoms with same mol_id together.
|
| 873 |
+
atom_array = atom_array[new_atom_perm]
|
| 874 |
+
|
| 875 |
+
# assign mol_atom_index
|
| 876 |
+
mol_starts = get_starts_by(
|
| 877 |
+
atom_array, by_annot="mol_id", add_exclusive_stop=True
|
| 878 |
+
)
|
| 879 |
+
mol_atom_index = np.zeros_like(atom_array.mol_id, dtype=np.int32)
|
| 880 |
+
for start, stop in zip(mol_starts[:-1], mol_starts[1:]):
|
| 881 |
+
mol_atom_index[start:stop] = np.arange(stop - start)
|
| 882 |
+
atom_array.set_annotation("mol_atom_index", mol_atom_index)
|
| 883 |
+
|
| 884 |
+
# check mol equivalence again
|
| 885 |
+
if check_final_equiv:
|
| 886 |
+
num_mols = len(mol_starts) - 1
|
| 887 |
+
for i in range(num_mols):
|
| 888 |
+
for j in range(i + 1, num_mols):
|
| 889 |
+
start_i, stop_i = mol_starts[i], mol_starts[i + 1]
|
| 890 |
+
start_j, stop_j = mol_starts[j], mol_starts[j + 1]
|
| 891 |
+
if (
|
| 892 |
+
atom_array.entity_mol_id[start_i]
|
| 893 |
+
!= atom_array.entity_mol_id[start_j]
|
| 894 |
+
):
|
| 895 |
+
continue
|
| 896 |
+
for key in ["res_name", "atom_name", "mol_atom_index"]:
|
| 897 |
+
# not check res_id for ligand may have different res_id
|
| 898 |
+
annot = getattr(atom_array, key)
|
| 899 |
+
assert np.all(
|
| 900 |
+
annot[start_i:stop_i] == annot[start_j:stop_j]
|
| 901 |
+
), f"not equal {key} when find_equiv_mol_and_assign_ids()"
|
| 902 |
+
|
| 903 |
+
return atom_array
|
| 904 |
+
|
| 905 |
+
@staticmethod
|
| 906 |
+
def add_tokatom_idx(atom_array: AtomArray) -> AtomArray:
|
| 907 |
+
"""
|
| 908 |
+
Add a tokatom_idx corresponding to the residue and atom name for each atom.
|
| 909 |
+
For non-standard residues or ligands, the tokatom_idx should be set to 0.
|
| 910 |
+
|
| 911 |
+
Parameters:
|
| 912 |
+
atom_array (AtomArray): The AtomArray object to which the annotation will be added.
|
| 913 |
+
|
| 914 |
+
Returns:
|
| 915 |
+
AtomArray: The AtomArray object with the 'tokatom_idx' annotation added.
|
| 916 |
+
"""
|
| 917 |
+
# pre-defined atom name order for tokatom_idx
|
| 918 |
+
tokatom_idx_list = []
|
| 919 |
+
for atom in atom_array:
|
| 920 |
+
atom_name_position = RES_ATOMS_DICT.get(atom.res_name, None)
|
| 921 |
+
if atom.mol_type == "ligand" or atom_name_position is None:
|
| 922 |
+
tokatom_idx = 0
|
| 923 |
+
else:
|
| 924 |
+
tokatom_idx = atom_name_position[atom.atom_name]
|
| 925 |
+
tokatom_idx_list.append(tokatom_idx)
|
| 926 |
+
atom_array.set_annotation("tokatom_idx", tokatom_idx_list)
|
| 927 |
+
return atom_array
|
| 928 |
+
|
| 929 |
+
@staticmethod
|
| 930 |
+
def add_mol_id(atom_array: AtomArray) -> AtomArray:
|
| 931 |
+
"""
|
| 932 |
+
Assign a unique integer to each molecule in the structure.
|
| 933 |
+
|
| 934 |
+
Args:
|
| 935 |
+
atom_array (AtomArray): Biotite AtomArray object
|
| 936 |
+
Returns:
|
| 937 |
+
AtomArray: Biotite AtomArray object with new annotations
|
| 938 |
+
- mol_id: atoms with covalent bonds connected, 0-based int
|
| 939 |
+
"""
|
| 940 |
+
mol_indices = get_molecule_indices(atom_array)
|
| 941 |
+
|
| 942 |
+
# assign mol_id
|
| 943 |
+
mol_ids = np.array([-1] * len(atom_array), dtype=np.int32)
|
| 944 |
+
for mol_id, atom_indices in enumerate(mol_indices):
|
| 945 |
+
mol_ids[atom_indices] = mol_id
|
| 946 |
+
atom_array.set_annotation("mol_id", mol_ids)
|
| 947 |
+
return atom_array
|
| 948 |
+
|
| 949 |
+
@staticmethod
|
| 950 |
+
def unique_chain_and_add_ids(atom_array: AtomArray) -> AtomArray:
|
| 951 |
+
"""
|
| 952 |
+
Unique chain ID and add asym_id, entity_id, sym_id.
|
| 953 |
+
Adds a number to the chain ID to make chain IDs in the assembly unique.
|
| 954 |
+
Example: [A, B, A, B, C] ==> [A0, B0, A1, B1, C0]
|
| 955 |
+
|
| 956 |
+
Args:
|
| 957 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 958 |
+
|
| 959 |
+
Returns:
|
| 960 |
+
AtomArray: Biotite AtomArray object with new annotations:
|
| 961 |
+
- asym_id_int: np.array(int)
|
| 962 |
+
- entity_id_int: np.array(int)
|
| 963 |
+
- sym_id_int: np.array(int)
|
| 964 |
+
"""
|
| 965 |
+
entity_id_uniq = np.sort(np.unique(atom_array.label_entity_id))
|
| 966 |
+
entity_id_dict = {e: i for i, e in enumerate(entity_id_uniq)}
|
| 967 |
+
asym_ids = np.zeros(len(atom_array), dtype=int)
|
| 968 |
+
entity_ids = np.zeros(len(atom_array), dtype=int)
|
| 969 |
+
sym_ids = np.zeros(len(atom_array), dtype=int)
|
| 970 |
+
chain_ids = np.zeros(len(atom_array), dtype="U4")
|
| 971 |
+
counter = Counter()
|
| 972 |
+
start_indices = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
|
| 973 |
+
for i in range(len(start_indices) - 1):
|
| 974 |
+
start_i = start_indices[i]
|
| 975 |
+
stop_i = start_indices[i + 1]
|
| 976 |
+
asym_ids[start_i:stop_i] = i
|
| 977 |
+
|
| 978 |
+
entity_id = atom_array.label_entity_id[start_i]
|
| 979 |
+
entity_ids[start_i:stop_i] = entity_id_dict[entity_id]
|
| 980 |
+
|
| 981 |
+
sym_ids[start_i:stop_i] = counter[entity_id]
|
| 982 |
+
counter[entity_id] += 1
|
| 983 |
+
new_chain_id = f"{atom_array.chain_id[start_i]}{sym_ids[start_i]}"
|
| 984 |
+
chain_ids[start_i:stop_i] = new_chain_id
|
| 985 |
+
|
| 986 |
+
atom_array.set_annotation("asym_id_int", asym_ids)
|
| 987 |
+
atom_array.set_annotation("entity_id_int", entity_ids)
|
| 988 |
+
atom_array.set_annotation("sym_id_int", sym_ids)
|
| 989 |
+
atom_array.chain_id = chain_ids
|
| 990 |
+
return atom_array
|
| 991 |
+
|
| 992 |
+
@staticmethod
|
| 993 |
+
def add_int_id(atom_array):
|
| 994 |
+
"""
|
| 995 |
+
Unique chain ID and add asym_id, entity_id, sym_id.
|
| 996 |
+
Adds a number to the chain ID to make chain IDs in the assembly unique.
|
| 997 |
+
Example: [A, B, A, B, C] ==> [A0, B0, A1, B1, C0]
|
| 998 |
+
|
| 999 |
+
Args:
|
| 1000 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 1001 |
+
|
| 1002 |
+
Returns:
|
| 1003 |
+
AtomArray: Biotite AtomArray object with new annotations:
|
| 1004 |
+
- asym_id_int: np.array(int)
|
| 1005 |
+
- entity_id_int: np.array(int)
|
| 1006 |
+
- sym_id_int: np.array(int)
|
| 1007 |
+
"""
|
| 1008 |
+
entity_id_uniq = np.sort(np.unique(atom_array.label_entity_id))
|
| 1009 |
+
entity_id_dict = {e: i for i, e in enumerate(entity_id_uniq)}
|
| 1010 |
+
asym_ids = np.zeros(len(atom_array), dtype=int)
|
| 1011 |
+
entity_ids = np.zeros(len(atom_array), dtype=int)
|
| 1012 |
+
sym_ids = np.zeros(len(atom_array), dtype=int)
|
| 1013 |
+
counter = Counter()
|
| 1014 |
+
start_indices = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
|
| 1015 |
+
for i in range(len(start_indices) - 1):
|
| 1016 |
+
start_i = start_indices[i]
|
| 1017 |
+
stop_i = start_indices[i + 1]
|
| 1018 |
+
asym_ids[start_i:stop_i] = i
|
| 1019 |
+
|
| 1020 |
+
entity_id = atom_array.label_entity_id[start_i]
|
| 1021 |
+
entity_ids[start_i:stop_i] = entity_id_dict[entity_id]
|
| 1022 |
+
|
| 1023 |
+
sym_ids[start_i:stop_i] = counter[entity_id]
|
| 1024 |
+
counter[entity_id] += 1
|
| 1025 |
+
|
| 1026 |
+
atom_array.set_annotation("asym_id_int", asym_ids)
|
| 1027 |
+
atom_array.set_annotation("entity_id_int", entity_ids)
|
| 1028 |
+
atom_array.set_annotation("sym_id_int", sym_ids)
|
| 1029 |
+
return atom_array
|
| 1030 |
+
|
| 1031 |
+
@staticmethod
|
| 1032 |
+
def add_ref_feat_info(
|
| 1033 |
+
atom_array: AtomArray,
|
| 1034 |
+
) -> tuple[np.ndarray, np.ndarray, list[int]]:
|
| 1035 |
+
"""
|
| 1036 |
+
Get info of reference structure of atoms based on the atom array.
|
| 1037 |
+
|
| 1038 |
+
Args:
|
| 1039 |
+
atom_array (AtomArray): The atom array.
|
| 1040 |
+
|
| 1041 |
+
Returns:
|
| 1042 |
+
tuple:
|
| 1043 |
+
ref_pos (numpy.ndarray): Atom positions in the reference conformer,
|
| 1044 |
+
with a random rotation and translation applied.
|
| 1045 |
+
Atom positions are given in Å. Shape=(num_atom, 3).
|
| 1046 |
+
ref_charge (numpy.ndarray): Charge for each atom in the reference conformer. Shape=(num_atom)
|
| 1047 |
+
ref_mask ((numpy.ndarray): Mask indicating which atom slots are used in the reference conformer. Shape=(num_atom)
|
| 1048 |
+
"""
|
| 1049 |
+
info_dict = {}
|
| 1050 |
+
for ccd_id in np.unique(atom_array.res_name):
|
| 1051 |
+
# create ref conformer for each CCD ID
|
| 1052 |
+
ref_result = get_ccd_ref_info(ccd_id)
|
| 1053 |
+
if ref_result:
|
| 1054 |
+
for space_uid in np.unique(
|
| 1055 |
+
atom_array[atom_array.res_name == ccd_id].ref_space_uid
|
| 1056 |
+
):
|
| 1057 |
+
if ref_result:
|
| 1058 |
+
info_dict[space_uid] = [
|
| 1059 |
+
ref_result["atom_map"],
|
| 1060 |
+
ref_result["coord"],
|
| 1061 |
+
ref_result["charge"],
|
| 1062 |
+
ref_result["mask"],
|
| 1063 |
+
]
|
| 1064 |
+
else:
|
| 1065 |
+
# get conformer failed will result in an empty dictionary
|
| 1066 |
+
continue
|
| 1067 |
+
|
| 1068 |
+
ref_mask = [] # [N_atom]
|
| 1069 |
+
ref_pos = [] # [N_atom, 3]
|
| 1070 |
+
ref_charge = [] # [N_atom]
|
| 1071 |
+
for atom in atom_array:
|
| 1072 |
+
ref_result = info_dict.get(atom.ref_space_uid)
|
| 1073 |
+
if ref_result is None:
|
| 1074 |
+
# get conformer failed
|
| 1075 |
+
ref_mask.append(0)
|
| 1076 |
+
ref_pos.append([0.0, 0.0, 0.0])
|
| 1077 |
+
ref_charge.append(0)
|
| 1078 |
+
|
| 1079 |
+
else:
|
| 1080 |
+
atom_map, coord, charge, mask = ref_result
|
| 1081 |
+
atom_sub_idx = atom_map[atom.atom_name]
|
| 1082 |
+
ref_mask.append(mask[atom_sub_idx])
|
| 1083 |
+
ref_pos.append(coord[atom_sub_idx])
|
| 1084 |
+
ref_charge.append(charge[atom_sub_idx])
|
| 1085 |
+
|
| 1086 |
+
ref_pos = np.array(ref_pos)
|
| 1087 |
+
ref_charge = np.array(ref_charge).astype(int)
|
| 1088 |
+
ref_mask = np.array(ref_mask).astype(int)
|
| 1089 |
+
return ref_pos, ref_charge, ref_mask
|
| 1090 |
+
|
| 1091 |
+
@staticmethod
|
| 1092 |
+
def add_res_perm(
|
| 1093 |
+
atom_array: AtomArray,
|
| 1094 |
+
) -> tuple[np.ndarray, np.ndarray, list[int]]:
|
| 1095 |
+
"""
|
| 1096 |
+
Get permutations of each atom within the residue.
|
| 1097 |
+
|
| 1098 |
+
Args:
|
| 1099 |
+
atom_array (AtomArray): biotite AtomArray object.
|
| 1100 |
+
|
| 1101 |
+
Returns:
|
| 1102 |
+
list[list[int]]: 2D list of (N_atom, N_perm)
|
| 1103 |
+
"""
|
| 1104 |
+
starts = get_residue_starts(atom_array, add_exclusive_stop=True)
|
| 1105 |
+
res_perm = []
|
| 1106 |
+
for start, stop in zip(starts[:-1], starts[1:]):
|
| 1107 |
+
res_atom = atom_array[start:stop]
|
| 1108 |
+
curr_res_atom_idx = list(range(len(res_atom)))
|
| 1109 |
+
|
| 1110 |
+
res_dict = get_ccd_ref_info(ccd_code=res_atom.res_name[0])
|
| 1111 |
+
if not res_dict:
|
| 1112 |
+
res_perm.extend([[i] for i in curr_res_atom_idx])
|
| 1113 |
+
continue
|
| 1114 |
+
|
| 1115 |
+
perm_array = res_dict["perm"] # [N_atoms, N_perm]
|
| 1116 |
+
perm_atom_idx_in_res_order = [
|
| 1117 |
+
res_dict["atom_map"][i] for i in res_atom.atom_name
|
| 1118 |
+
]
|
| 1119 |
+
perm_idx_to_present_atom_idx = dict(
|
| 1120 |
+
zip(perm_atom_idx_in_res_order, curr_res_atom_idx)
|
| 1121 |
+
)
|
| 1122 |
+
|
| 1123 |
+
precent_row_mask = np.isin(perm_array[:, 0], perm_atom_idx_in_res_order)
|
| 1124 |
+
perm_array_row_filtered = perm_array[precent_row_mask]
|
| 1125 |
+
|
| 1126 |
+
precent_col_mask = np.isin(
|
| 1127 |
+
perm_array_row_filtered, perm_atom_idx_in_res_order
|
| 1128 |
+
).all(axis=0)
|
| 1129 |
+
perm_array_filtered = perm_array_row_filtered[:, precent_col_mask]
|
| 1130 |
+
|
| 1131 |
+
# replace the elem in new_perm_array according to the perm_idx_to_present_atom_idx dict
|
| 1132 |
+
new_perm_array = np.vectorize(perm_idx_to_present_atom_idx.get)(
|
| 1133 |
+
perm_array_filtered
|
| 1134 |
+
)
|
| 1135 |
+
|
| 1136 |
+
assert (
|
| 1137 |
+
new_perm_array.shape[1] <= 1000
|
| 1138 |
+
and new_perm_array.shape[1] <= perm_array.shape[1]
|
| 1139 |
+
)
|
| 1140 |
+
res_perm.extend(new_perm_array.tolist())
|
| 1141 |
+
return res_perm
|
| 1142 |
+
|
| 1143 |
+
@staticmethod
|
| 1144 |
+
def add_ref_info_and_res_perm(atom_array: AtomArray) -> AtomArray:
|
| 1145 |
+
"""
|
| 1146 |
+
Add info of reference structure of atoms to the atom array.
|
| 1147 |
+
|
| 1148 |
+
Args:
|
| 1149 |
+
atom_array (AtomArray): The atom array.
|
| 1150 |
+
|
| 1151 |
+
Returns:
|
| 1152 |
+
AtomArray: The atom array with the 'ref_pos', 'ref_charge', 'ref_mask', 'res_perm' annotations added.
|
| 1153 |
+
"""
|
| 1154 |
+
ref_pos, ref_charge, ref_mask = AddAtomArrayAnnot.add_ref_feat_info(atom_array)
|
| 1155 |
+
res_perm = AddAtomArrayAnnot.add_res_perm(atom_array)
|
| 1156 |
+
|
| 1157 |
+
str_res_perm = [] # encode [N_atom, N_perm] -> list[str]
|
| 1158 |
+
for i in res_perm:
|
| 1159 |
+
str_res_perm.append("_".join([str(j) for j in i]))
|
| 1160 |
+
|
| 1161 |
+
assert (
|
| 1162 |
+
len(atom_array)
|
| 1163 |
+
== len(ref_pos)
|
| 1164 |
+
== len(ref_charge)
|
| 1165 |
+
== len(ref_mask)
|
| 1166 |
+
== len(res_perm)
|
| 1167 |
+
), f"{len(atom_array)=}, {len(ref_pos)=}, {len(ref_charge)=}, {len(ref_mask)=}, {len(str_res_perm)=}"
|
| 1168 |
+
|
| 1169 |
+
atom_array.set_annotation("ref_pos", ref_pos)
|
| 1170 |
+
atom_array.set_annotation("ref_charge", ref_charge)
|
| 1171 |
+
atom_array.set_annotation("ref_mask", ref_mask)
|
| 1172 |
+
atom_array.set_annotation("res_perm", str_res_perm)
|
| 1173 |
+
return atom_array
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/substructure_perms.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import itertools
|
| 16 |
+
from collections import defaultdict
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
from rdkit import Chem
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def neutralize_atoms(mol: Chem.Mol):
|
| 23 |
+
pattern = Chem.MolFromSmarts(
|
| 24 |
+
"[+1!h0!$([*]~[-1,-2,-3,-4]),-1!#4!#5!$([*]~[+1,+2,+3,+4])]"
|
| 25 |
+
)
|
| 26 |
+
at_matches = mol.GetSubstructMatches(pattern)
|
| 27 |
+
at_matches_list = [y[0] for y in at_matches]
|
| 28 |
+
if len(at_matches_list) > 0:
|
| 29 |
+
for at_idx in at_matches_list:
|
| 30 |
+
atom = mol.GetAtomWithIdx(at_idx)
|
| 31 |
+
chg = atom.GetFormalCharge()
|
| 32 |
+
hcount = atom.GetTotalNumHs()
|
| 33 |
+
atom.SetFormalCharge(0)
|
| 34 |
+
atom.SetNumExplicitHs(hcount - chg)
|
| 35 |
+
atom.UpdatePropertyCache()
|
| 36 |
+
return mol
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def recursive_permutation(atom_inds, permutation_list, res):
|
| 40 |
+
def _permute_atom_ind(atom_inds, permutation):
|
| 41 |
+
# atom_inds: list of atom (positional) indices
|
| 42 |
+
# permutation: values to be permutated in the given order
|
| 43 |
+
permute_inds = [i for i, a in enumerate(atom_inds) if a in permutation]
|
| 44 |
+
for i, perm_ind in enumerate(permute_inds):
|
| 45 |
+
atom_inds[perm_ind] = permutation[i]
|
| 46 |
+
return atom_inds
|
| 47 |
+
|
| 48 |
+
if len(permutation_list) == 0:
|
| 49 |
+
res.append(atom_inds)
|
| 50 |
+
else:
|
| 51 |
+
current_permutation_list = permutation_list.copy()
|
| 52 |
+
for permutation in current_permutation_list.pop(0):
|
| 53 |
+
atom_inds_permed = _permute_atom_ind(atom_inds.copy(), permutation)
|
| 54 |
+
recursive_permutation(atom_inds_permed, current_permutation_list, res)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def augment_atom_maps_with_conjugate_terminal_groups(
|
| 58 |
+
original_maps, atomic_number_mapping, terminal_group_tuples, MaxMatches=1e6
|
| 59 |
+
):
|
| 60 |
+
"""
|
| 61 |
+
Augment atom maps from GetSubstructMatches with extra symmetry from confjugated terminal groups.
|
| 62 |
+
Parameters
|
| 63 |
+
--------------
|
| 64 |
+
original_maps: Tuple(Tuples), all possible atom index mappings, note we require that the mappings should range from 0 to n_heavy_atom-1 (a.k.a. no gap in indexing)
|
| 65 |
+
atomic_number_mapping: dict, mapping from atom (positional) indices to its atomic numbers, for splitting/removing different types of atoms in each terminal group
|
| 66 |
+
terminal_group_tuples: Tuple(Tuples), a group of pair of atoms whose bonds match the SMARTS string. Ex: ((0, 1), (2, 1), (10, 9), (11, 9), (12, 9), (14, 13), (15, 13))
|
| 67 |
+
MaxMatches: int, cutoff for total number of matches (n_original_perm * n_conjugate perm)
|
| 68 |
+
|
| 69 |
+
Returns
|
| 70 |
+
--------------
|
| 71 |
+
augmented_maps: Tuple(Tuples) , original_maps augmented by muliplying the permutations induced by terminal_group_tuples.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def _terminal_atom_cluster_from_pairs(edges):
|
| 75 |
+
graph = defaultdict(set)
|
| 76 |
+
for u, v in edges:
|
| 77 |
+
graph[u].add(v)
|
| 78 |
+
graph[v].add(u)
|
| 79 |
+
return graph
|
| 80 |
+
|
| 81 |
+
def _split_sets_by_mapped_values(list_of_sets, mapping):
|
| 82 |
+
result = []
|
| 83 |
+
for s in list_of_sets:
|
| 84 |
+
mapped_sets = {}
|
| 85 |
+
for elem in s:
|
| 86 |
+
mapped_value = mapping.get(elem)
|
| 87 |
+
if mapped_value not in mapped_sets:
|
| 88 |
+
mapped_sets[mapped_value] = set()
|
| 89 |
+
mapped_sets[mapped_value].add(elem)
|
| 90 |
+
result.extend(mapped_sets.values())
|
| 91 |
+
return result
|
| 92 |
+
|
| 93 |
+
# group terminal group tuples with common atom_indices: [{0, 2}, {10, 11, 12}, {14, 15}]
|
| 94 |
+
terminal_atom_clusters = _terminal_atom_cluster_from_pairs(terminal_group_tuples)
|
| 95 |
+
MaxTerminalGroups = max(
|
| 96 |
+
1, int(np.ceil(np.emath.logn(3, MaxMatches / len(original_maps))))
|
| 97 |
+
)
|
| 98 |
+
# if MaxTerminalGroups is less than the total number terminal groups, sample the first {MaxTerminalGroups} groups (to remove randomness)
|
| 99 |
+
|
| 100 |
+
perm_groups = sorted(
|
| 101 |
+
[
|
| 102 |
+
atom_inds
|
| 103 |
+
for common_id, atom_inds in terminal_atom_clusters.items()
|
| 104 |
+
if len(atom_inds) > 1
|
| 105 |
+
]
|
| 106 |
+
)[: min(MaxTerminalGroups, len(terminal_atom_clusters))]
|
| 107 |
+
|
| 108 |
+
# within each terminal group, if there are different atom types, split by atom type (if only one left, discard)
|
| 109 |
+
perm_groups = _split_sets_by_mapped_values(perm_groups, atomic_number_mapping)
|
| 110 |
+
perm_groups = [p for p in perm_groups if len(p) > 1]
|
| 111 |
+
|
| 112 |
+
# all permutations according to symmetric conjugate terminal atoms: [[(0, 2), (2, 0)], [(10, 11, 12), (10, 12, 11), (11, 10, 12), (11, 12, 10), (12, 10, 11), (12, 11, 10)], [(14, 15), (15, 14)]]
|
| 113 |
+
perm_groups = [sorted(list(itertools.permutations(g))) for g in perm_groups]
|
| 114 |
+
|
| 115 |
+
# recursively permute the original mappings
|
| 116 |
+
augmented_maps = []
|
| 117 |
+
for initial_mapping in original_maps:
|
| 118 |
+
recursive_permutation(list(initial_mapping), perm_groups, augmented_maps)
|
| 119 |
+
|
| 120 |
+
# Convert to the same data type as in original_maps
|
| 121 |
+
augmented_maps = tuple(tuple(a) for a in augmented_maps)
|
| 122 |
+
# Remove duplicates: original_maps might have already permutated some of the conjugate_terminal group indices
|
| 123 |
+
return tuple(set(augmented_maps))
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def _get_substructure_perms(
|
| 127 |
+
mol: Chem.Mol,
|
| 128 |
+
Neutralize: bool = False,
|
| 129 |
+
CheckStereochem: bool = True,
|
| 130 |
+
SymmetrizeConjugatedTerminal: bool = True,
|
| 131 |
+
MaxMatches: int = 512,
|
| 132 |
+
) -> np.ndarray:
|
| 133 |
+
"""
|
| 134 |
+
Args:
|
| 135 |
+
CheckStereochem: whether to assure stereochem does not change after permutation
|
| 136 |
+
Neutralize: if true, neutralize the mol before computing the permutations
|
| 137 |
+
SymmetrizeConjugatedTerminal: if true, consider symmetrization of conjugated terminal groups
|
| 138 |
+
MaxMatches: int, cutoff for total number of matches
|
| 139 |
+
|
| 140 |
+
return shape=[num_perms, num_atoms]
|
| 141 |
+
"""
|
| 142 |
+
ori_idx_w_h = []
|
| 143 |
+
for atom in mol.GetAtoms():
|
| 144 |
+
atom.SetProp("ori_idx_w_h", str(atom.GetIdx()))
|
| 145 |
+
ori_idx_w_h.append(atom.GetIdx())
|
| 146 |
+
|
| 147 |
+
# Attention !!!
|
| 148 |
+
# Remove Hs; Otherwise, there will be too many matches.
|
| 149 |
+
mol = Chem.RemoveHs(mol)
|
| 150 |
+
if Neutralize:
|
| 151 |
+
mol = neutralize_atoms(mol)
|
| 152 |
+
|
| 153 |
+
# Get substructure matches
|
| 154 |
+
base_perms = np.array(
|
| 155 |
+
mol.GetSubstructMatches(mol, uniquify=False, maxMatches=MaxMatches)
|
| 156 |
+
)
|
| 157 |
+
assert len(base_perms) > 0, "no matches found, error"
|
| 158 |
+
# Check stereochem
|
| 159 |
+
if CheckStereochem:
|
| 160 |
+
chem_order = np.array(
|
| 161 |
+
list(Chem.rdmolfiles.CanonicalRankAtoms(mol, breakTies=False))
|
| 162 |
+
)
|
| 163 |
+
perms_mask = (chem_order[base_perms] == chem_order[None]).sum(
|
| 164 |
+
-1
|
| 165 |
+
) == mol.GetNumAtoms()
|
| 166 |
+
base_perms = base_perms[perms_mask]
|
| 167 |
+
|
| 168 |
+
# Add terminal conjugate groups
|
| 169 |
+
sma = "[O,N;D1;$([O,N;D1]-[*]=[O,N;D1]),$([O,N;D1]=[*]-[O,N;D1])]~[*]"
|
| 170 |
+
patt = Chem.MolFromSmarts(sma)
|
| 171 |
+
terminal_group_tuples = mol.GetSubstructMatches(patt)
|
| 172 |
+
if (
|
| 173 |
+
len(terminal_group_tuples) > 0 and SymmetrizeConjugatedTerminal
|
| 174 |
+
): # Only augment if there exist conjugate pairs or if user sets to
|
| 175 |
+
atomic_number_mapping = {
|
| 176 |
+
i: atom.GetAtomicNum() for i, atom in enumerate(mol.GetAtoms())
|
| 177 |
+
}
|
| 178 |
+
base_perms = augment_atom_maps_with_conjugate_terminal_groups(
|
| 179 |
+
tuple(tuple(a) for a in base_perms),
|
| 180 |
+
atomic_number_mapping,
|
| 181 |
+
terminal_group_tuples,
|
| 182 |
+
MaxMatches,
|
| 183 |
+
)
|
| 184 |
+
base_perms = np.array(base_perms)
|
| 185 |
+
|
| 186 |
+
if len(base_perms) > MaxMatches:
|
| 187 |
+
base_perms = base_perms[:MaxMatches]
|
| 188 |
+
|
| 189 |
+
new_to_ori_idx_map = {}
|
| 190 |
+
ori_to_new_idx_map = {}
|
| 191 |
+
for atom in mol.GetAtoms():
|
| 192 |
+
ori_idx = int(atom.GetProp("ori_idx_w_h"))
|
| 193 |
+
new_idx = atom.GetIdx()
|
| 194 |
+
new_to_ori_idx_map[new_idx] = ori_idx
|
| 195 |
+
ori_to_new_idx_map[ori_idx] = new_idx
|
| 196 |
+
|
| 197 |
+
base_perms = np.vectorize(new_to_ori_idx_map.get)(base_perms)
|
| 198 |
+
perms = np.zeros(shape=(base_perms.shape[0], len(ori_idx_w_h)))
|
| 199 |
+
for i in range(len(ori_idx_w_h)):
|
| 200 |
+
if i in ori_to_new_idx_map:
|
| 201 |
+
perms[:, i] = base_perms[:, ori_to_new_idx_map[i]]
|
| 202 |
+
else:
|
| 203 |
+
# The position of the H atom will not be exchanged.
|
| 204 |
+
perms[:, i] = i
|
| 205 |
+
return perms
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def get_substructure_perms(
|
| 209 |
+
mol: Chem.Mol,
|
| 210 |
+
CheckStereochem: bool = True,
|
| 211 |
+
SymmetrizeConjugatedTerminal: bool = True,
|
| 212 |
+
MaxMatches: int = 512,
|
| 213 |
+
KeepProtonation: bool = False,
|
| 214 |
+
) -> np.ndarray:
|
| 215 |
+
kwargs = {
|
| 216 |
+
"CheckStereochem": CheckStereochem,
|
| 217 |
+
"SymmetrizeConjugatedTerminal": SymmetrizeConjugatedTerminal,
|
| 218 |
+
"MaxMatches": MaxMatches,
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
if KeepProtonation:
|
| 222 |
+
perms = _get_substructure_perms(mol, Neutralize=False, **kwargs)
|
| 223 |
+
else:
|
| 224 |
+
# Have to deuplicate permutations across the two protonation states
|
| 225 |
+
perms = np.unique(
|
| 226 |
+
np.row_stack(
|
| 227 |
+
(
|
| 228 |
+
_get_substructure_perms(mol, Neutralize=False, **kwargs),
|
| 229 |
+
_get_substructure_perms(mol, Neutralize=True, **kwargs),
|
| 230 |
+
)
|
| 231 |
+
),
|
| 232 |
+
axis=0,
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
nperm = len(perms)
|
| 236 |
+
if nperm > MaxMatches:
|
| 237 |
+
perms = perms[np.random.choice(range(nperm), MaxMatches, replace=False)]
|
| 238 |
+
return perms
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def test():
|
| 242 |
+
testcases = [
|
| 243 |
+
"C1=CC=CC=C1",
|
| 244 |
+
"CC(=O)OC1=CC=CC=C1C(=O)O",
|
| 245 |
+
"C[C@H](CCC(=O)O)[C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C",
|
| 246 |
+
"CN1C=NC2=C1C(=O)N(C(=O)N2C)C",
|
| 247 |
+
]
|
| 248 |
+
for smiles in testcases:
|
| 249 |
+
print(smiles)
|
| 250 |
+
molecule = Chem.MolFromSmiles(smiles)
|
| 251 |
+
perms = get_substructure_perms(molecule)
|
| 252 |
+
print(perms.shape)
|
| 253 |
+
print(perms.T)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
if __name__ == "__main__":
|
| 257 |
+
test()
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/tokenizer.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import biotite.structure as struc
|
| 16 |
+
import numpy as np
|
| 17 |
+
from biotite.structure import AtomArray
|
| 18 |
+
|
| 19 |
+
from protenix.data.constants import ELEMS, STD_RESIDUES
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Token(object):
|
| 23 |
+
"""
|
| 24 |
+
Used to store information related to Tokens.
|
| 25 |
+
|
| 26 |
+
Example:
|
| 27 |
+
>>> token = Token(1)
|
| 28 |
+
>>> token.value
|
| 29 |
+
1
|
| 30 |
+
>>> token.atom_indices = [1, 2, 3]
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, value, **kwargs):
|
| 34 |
+
self.value = value
|
| 35 |
+
self._annot = {}
|
| 36 |
+
for name, annotation in kwargs.items():
|
| 37 |
+
self._annot[name] = annotation
|
| 38 |
+
|
| 39 |
+
def __getattr__(self, attr):
|
| 40 |
+
if attr in super().__getattribute__("_annot"):
|
| 41 |
+
return self._annot[attr]
|
| 42 |
+
else:
|
| 43 |
+
raise AttributeError(
|
| 44 |
+
f"'{type(self).__name__}' object has no attribute '{attr}'"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def __repr__(self):
|
| 48 |
+
annot_lst = []
|
| 49 |
+
for k, v in self._annot.items():
|
| 50 |
+
annot_lst.append(f"{k}={v}")
|
| 51 |
+
return f'Token({self.value}, {",".join(annot_lst)})'
|
| 52 |
+
|
| 53 |
+
def __setattr__(self, attr, value):
|
| 54 |
+
if attr == "_annot":
|
| 55 |
+
super().__setattr__(attr, value)
|
| 56 |
+
elif attr == "value":
|
| 57 |
+
super().__setattr__(attr, value)
|
| 58 |
+
else:
|
| 59 |
+
self._annot[attr] = value
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class TokenArray(object):
|
| 63 |
+
"""
|
| 64 |
+
A group of Token objects used for batch operations.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(self, tokens: list[Token]):
|
| 68 |
+
self.tokens = tokens
|
| 69 |
+
|
| 70 |
+
def __repr__(self):
|
| 71 |
+
repr_str = "TokenArray(\n"
|
| 72 |
+
for token in self.tokens:
|
| 73 |
+
repr_str += f"\t{token}\n"
|
| 74 |
+
repr_str += ")"
|
| 75 |
+
return repr_str
|
| 76 |
+
|
| 77 |
+
def __len__(self):
|
| 78 |
+
return len(self.tokens)
|
| 79 |
+
|
| 80 |
+
def __iter__(self):
|
| 81 |
+
for token in self.tokens:
|
| 82 |
+
yield token
|
| 83 |
+
|
| 84 |
+
def __getitem__(self, index):
|
| 85 |
+
if isinstance(index, int):
|
| 86 |
+
return self.tokens[index]
|
| 87 |
+
else:
|
| 88 |
+
return TokenArray([self.tokens[i] for i in index])
|
| 89 |
+
|
| 90 |
+
def get_annotation(self, category):
|
| 91 |
+
return [token._annot[category] for token in self.tokens]
|
| 92 |
+
|
| 93 |
+
def set_annotation(self, category, values):
|
| 94 |
+
assert len(values) == len(
|
| 95 |
+
self.tokens
|
| 96 |
+
), "Length of values must match the number of tokens"
|
| 97 |
+
for token, value in zip(self.tokens, values):
|
| 98 |
+
token._annot[category] = value
|
| 99 |
+
|
| 100 |
+
def get_values(self):
|
| 101 |
+
return [token.value for token in self.tokens]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class AtomArrayTokenizer(object):
|
| 105 |
+
"""
|
| 106 |
+
Tokenize an AtomArray object into a list of Token object.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
def __init__(self, atom_array: AtomArray):
|
| 110 |
+
self.atom_array = atom_array
|
| 111 |
+
|
| 112 |
+
def tokenize(self) -> list[Token]:
|
| 113 |
+
"""
|
| 114 |
+
Ref: AlphaFold3 SI Chapter 2.6
|
| 115 |
+
Tokenize an AtomArray object into a list of Token object.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
list : a list of Token object.
|
| 119 |
+
"""
|
| 120 |
+
tokens = []
|
| 121 |
+
total_atom_num = 0
|
| 122 |
+
for res in struc.residue_iter(self.atom_array):
|
| 123 |
+
atom_num = len(res)
|
| 124 |
+
first_atom = res[0]
|
| 125 |
+
res_name = first_atom.res_name
|
| 126 |
+
mol_type = first_atom.mol_type
|
| 127 |
+
res_token = STD_RESIDUES.get(res_name, None)
|
| 128 |
+
if res_token is not None and mol_type != "ligand":
|
| 129 |
+
# for std residues
|
| 130 |
+
token = Token(res_token)
|
| 131 |
+
atom_indices = [
|
| 132 |
+
i for i in range(total_atom_num, total_atom_num + atom_num)
|
| 133 |
+
]
|
| 134 |
+
atom_names = [self.atom_array[i].atom_name for i in atom_indices]
|
| 135 |
+
token.atom_indices = atom_indices
|
| 136 |
+
token.atom_names = atom_names
|
| 137 |
+
tokens.append(token)
|
| 138 |
+
total_atom_num += atom_num
|
| 139 |
+
else:
|
| 140 |
+
# for ligand and non-std residues
|
| 141 |
+
for atom in res:
|
| 142 |
+
atom_elem = atom.element
|
| 143 |
+
atom_token = ELEMS.get(atom_elem, None)
|
| 144 |
+
if atom_token is None:
|
| 145 |
+
raise ValueError(f"Unknown atom element: {atom_elem}")
|
| 146 |
+
token = Token(atom_token)
|
| 147 |
+
token.atom_indices = [total_atom_num]
|
| 148 |
+
token.atom_names = [
|
| 149 |
+
self.atom_array[token.atom_indices[0]].atom_name
|
| 150 |
+
]
|
| 151 |
+
tokens.append(token)
|
| 152 |
+
total_atom_num += 1
|
| 153 |
+
|
| 154 |
+
assert total_atom_num == len(self.atom_array)
|
| 155 |
+
return tokens
|
| 156 |
+
|
| 157 |
+
def _set_token_annotations(self, token_array: TokenArray) -> TokenArray:
|
| 158 |
+
"""
|
| 159 |
+
Set annotations for the token_array.
|
| 160 |
+
|
| 161 |
+
The annotations include:
|
| 162 |
+
- centre_atom_index: the atom indices of the token in the atom array
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
token_array (TokenArray): TokenArray object created by tokenize bioassembly AtomArray.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
TokenArray: TokenArray object with annotations.
|
| 169 |
+
"""
|
| 170 |
+
centre_atom_indices = np.where(self.atom_array.centre_atom_mask == 1)[0]
|
| 171 |
+
token_array.set_annotation("centre_atom_index", centre_atom_indices)
|
| 172 |
+
assert len(token_array) == len(centre_atom_indices)
|
| 173 |
+
return token_array
|
| 174 |
+
|
| 175 |
+
def get_token_array(self) -> TokenArray:
|
| 176 |
+
"""
|
| 177 |
+
Get TokenArray object with annotations (atom_indices, centre_atom_index).
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
TokenArray: The TokenArray object with annotations.
|
| 181 |
+
TokenArray(
|
| 182 |
+
Token(1, atom_indices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],centre_atom_index=2,
|
| 183 |
+
atom_names=['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'])
|
| 184 |
+
Token(15, atom_indices=[11, 12, 13, 14, 15, 16],centre_atom_index=13,
|
| 185 |
+
atom_names=['N', 'CA', 'C', 'O', 'CB', 'OG'])
|
| 186 |
+
Token(15, atom_indices=[17, 18, 19, 20, 21, 22],centre_atom_index=19,
|
| 187 |
+
atom_names=['N', 'CA', 'C', 'O', 'CB', 'OG'])
|
| 188 |
+
)
|
| 189 |
+
it satisfy the following format
|
| 190 |
+
Token($token_index, atom_indices=[global_atom_indexs],
|
| 191 |
+
centre_atom_index=global_atom_indexs,atom_names=[names])
|
| 192 |
+
"""
|
| 193 |
+
tokens = self.tokenize()
|
| 194 |
+
token_array = TokenArray(tokens=tokens)
|
| 195 |
+
token_array = self._set_token_annotations(token_array=token_array)
|
| 196 |
+
return token_array
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/data/utils.py
ADDED
|
@@ -0,0 +1,675 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import copy
|
| 16 |
+
import os
|
| 17 |
+
import re
|
| 18 |
+
from collections import defaultdict
|
| 19 |
+
from typing import Mapping, Sequence
|
| 20 |
+
|
| 21 |
+
import biotite.structure as struc
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from biotite.structure import AtomArray
|
| 25 |
+
from biotite.structure.io import pdbx
|
| 26 |
+
from biotite.structure.io.pdb import PDBFile
|
| 27 |
+
|
| 28 |
+
from protenix.data.constants import DNA_STD_RESIDUES, PRO_STD_RESIDUES, RNA_STD_RESIDUES
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def remove_numbers(s: str) -> str:
|
| 32 |
+
"""
|
| 33 |
+
Remove numbers from a string.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
s (str): input string
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
str: a string with numbers removed.
|
| 40 |
+
"""
|
| 41 |
+
return re.sub(r"\d+", "", s)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def int_to_letters(n: int) -> str:
|
| 45 |
+
"""
|
| 46 |
+
Convert int to letters.
|
| 47 |
+
Useful for converting chain index to label_asym_id.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
n (int): int number
|
| 51 |
+
Returns:
|
| 52 |
+
str: letters. e.g. 1 -> A, 2 -> B, 27 -> AA, 28 -> AB
|
| 53 |
+
"""
|
| 54 |
+
result = ""
|
| 55 |
+
while n > 0:
|
| 56 |
+
n, remainder = divmod(n - 1, 26)
|
| 57 |
+
result = chr(65 + remainder) + result
|
| 58 |
+
return result
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_starts_by(
|
| 62 |
+
atom_array: AtomArray, by_annot: str, add_exclusive_stop=False
|
| 63 |
+
) -> np.ndarray:
|
| 64 |
+
"""get start indices by given annotation in an AtomArray
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
atom_array (AtomArray): Biotite AtomArray
|
| 68 |
+
by_annot (str): annotation to group by, eg: 'chain_id', 'res_id', 'res_name'
|
| 69 |
+
add_exclusive_stop (bool, optional): add exclusive stop (len(atom_array)). Defaults to False.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
np.ndarray: start indices of each group, shape = (n,), eg: [0, 10, 20, 30, 40]
|
| 73 |
+
"""
|
| 74 |
+
annot = getattr(atom_array, by_annot)
|
| 75 |
+
# If annotation change, a new start
|
| 76 |
+
annot_change_mask = annot[1:] != annot[:-1]
|
| 77 |
+
|
| 78 |
+
# Convert mask to indices
|
| 79 |
+
# Add 1, to shift the indices from the end of a residue
|
| 80 |
+
# to the start of a new residue
|
| 81 |
+
starts = np.where(annot_change_mask)[0] + 1
|
| 82 |
+
|
| 83 |
+
# The first start is not included yet -> Insert '[0]'
|
| 84 |
+
if add_exclusive_stop:
|
| 85 |
+
return np.concatenate(([0], starts, [atom_array.array_length()]))
|
| 86 |
+
else:
|
| 87 |
+
return np.concatenate(([0], starts))
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_ligand_polymer_bond_mask(
|
| 91 |
+
atom_array: AtomArray, lig_include_ions=False
|
| 92 |
+
) -> np.ndarray:
|
| 93 |
+
"""
|
| 94 |
+
Ref AlphaFold3 SI Chapter 3.7.1.
|
| 95 |
+
Get bonds between the bonded ligand and its parent chain.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
atom_array (AtomArray): biotite atom array object.
|
| 99 |
+
lig_include_ions (bool): whether to include ions in the ligand.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
np.ndarray: bond records between the bonded ligand and its parent chain.
|
| 103 |
+
e.g. np.array([[atom1, atom2, bond_order]...])
|
| 104 |
+
"""
|
| 105 |
+
if not lig_include_ions:
|
| 106 |
+
# bonded ligand exclude ions
|
| 107 |
+
unique_chain_id, counts = np.unique(
|
| 108 |
+
atom_array.label_asym_id, return_counts=True
|
| 109 |
+
)
|
| 110 |
+
chain_id_to_count_map = dict(zip(unique_chain_id, counts))
|
| 111 |
+
ions_mask = np.array(
|
| 112 |
+
[
|
| 113 |
+
chain_id_to_count_map[label_asym_id] == 1
|
| 114 |
+
for label_asym_id in atom_array.label_asym_id
|
| 115 |
+
]
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
lig_mask = (atom_array.mol_type == "ligand") & ~ions_mask
|
| 119 |
+
else:
|
| 120 |
+
lig_mask = atom_array.mol_type == "ligand"
|
| 121 |
+
|
| 122 |
+
# identify polymer by mol_type (protein, rna, dna, ligand)
|
| 123 |
+
polymer_mask = np.isin(atom_array.mol_type, ["protein", "rna", "dna"])
|
| 124 |
+
|
| 125 |
+
idx_i = atom_array.bonds._bonds[:, 0]
|
| 126 |
+
idx_j = atom_array.bonds._bonds[:, 1]
|
| 127 |
+
|
| 128 |
+
lig_polymer_bond_indices = np.where(
|
| 129 |
+
(lig_mask[idx_i] & polymer_mask[idx_j])
|
| 130 |
+
| (lig_mask[idx_j] & polymer_mask[idx_i])
|
| 131 |
+
)[0]
|
| 132 |
+
if lig_polymer_bond_indices.size == 0:
|
| 133 |
+
# no ligand-polymer bonds
|
| 134 |
+
lig_polymer_bonds = np.empty((0, 3)).astype(int)
|
| 135 |
+
else:
|
| 136 |
+
lig_polymer_bonds = atom_array.bonds._bonds[
|
| 137 |
+
lig_polymer_bond_indices
|
| 138 |
+
] # np.array([[atom1, atom2, bond_order]...])
|
| 139 |
+
return lig_polymer_bonds
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def get_clean_data(atom_array: AtomArray) -> AtomArray:
|
| 143 |
+
"""
|
| 144 |
+
Removes unresolved atoms from the AtomArray.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
atom_array (AtomArray): The input AtomArray containing atoms.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
AtomArray: A new AtomArray with unresolved atoms removed.
|
| 151 |
+
"""
|
| 152 |
+
atom_array_wo_unresol = atom_array.copy()
|
| 153 |
+
atom_array_wo_unresol = atom_array[atom_array.is_resolved]
|
| 154 |
+
return atom_array_wo_unresol
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def save_atoms_to_cif(
|
| 158 |
+
output_cif_file: str,
|
| 159 |
+
atom_array: AtomArray,
|
| 160 |
+
entity_poly_type: dict[str, str],
|
| 161 |
+
pdb_id: str,
|
| 162 |
+
) -> None:
|
| 163 |
+
"""
|
| 164 |
+
Save atom array data to a CIF file.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
output_cif_file (str): The output path for saving the atom array in CIF format.
|
| 168 |
+
atom_array (AtomArray): The atom array to be saved.
|
| 169 |
+
entity_poly_type: The entity poly type information.
|
| 170 |
+
pdb_id: The PDB ID for the entry.
|
| 171 |
+
"""
|
| 172 |
+
cifwriter = CIFWriter(atom_array, entity_poly_type)
|
| 173 |
+
cifwriter.save_to_cif(
|
| 174 |
+
output_path=output_cif_file,
|
| 175 |
+
entry_id=pdb_id,
|
| 176 |
+
include_bonds=False,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def save_structure_cif(
|
| 181 |
+
atom_array: AtomArray,
|
| 182 |
+
pred_coordinate: torch.Tensor,
|
| 183 |
+
output_fpath: str,
|
| 184 |
+
entity_poly_type: dict[str, str],
|
| 185 |
+
pdb_id: str,
|
| 186 |
+
):
|
| 187 |
+
"""
|
| 188 |
+
Save the predicted structure to a CIF file.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
atom_array (AtomArray): The original AtomArray containing the structure.
|
| 192 |
+
pred_coordinate (torch.Tensor): The predicted coordinates for the structure.
|
| 193 |
+
output_fpath (str): The output file path for saving the CIF file.
|
| 194 |
+
entity_poly_type (dict[str, str]): The entity poly type information.
|
| 195 |
+
pdb_id (str): The PDB ID for the entry.
|
| 196 |
+
"""
|
| 197 |
+
pred_atom_array = copy.deepcopy(atom_array)
|
| 198 |
+
pred_pose = pred_coordinate.cpu().numpy()
|
| 199 |
+
pred_atom_array.coord = pred_pose
|
| 200 |
+
save_atoms_to_cif(
|
| 201 |
+
output_fpath,
|
| 202 |
+
pred_atom_array,
|
| 203 |
+
entity_poly_type,
|
| 204 |
+
pdb_id,
|
| 205 |
+
)
|
| 206 |
+
# save pred coordinates wo unresolved atoms
|
| 207 |
+
if hasattr(atom_array, "is_resolved"):
|
| 208 |
+
pred_atom_array_wo_unresol = get_clean_data(pred_atom_array)
|
| 209 |
+
save_atoms_to_cif(
|
| 210 |
+
output_fpath.replace(".cif", "_wounresol.cif"),
|
| 211 |
+
pred_atom_array_wo_unresol,
|
| 212 |
+
entity_poly_type,
|
| 213 |
+
pdb_id,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class CIFWriter:
|
| 218 |
+
"""
|
| 219 |
+
Write AtomArray to cif.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
def __init__(self, atom_array: AtomArray, entity_poly_type: dict[str, str] = None):
|
| 223 |
+
"""
|
| 224 |
+
Args:
|
| 225 |
+
atom_array (AtomArray): Biotite AtomArray object.
|
| 226 |
+
entity_poly_type (dict[str, str], optional): A dict of label_entity_id to entity_poly_type. Defaults to None.
|
| 227 |
+
If None, "the entity_poly" and "entity_poly_seq" will not be written to the cif.
|
| 228 |
+
"""
|
| 229 |
+
self.atom_array = atom_array
|
| 230 |
+
self.entity_poly_type = entity_poly_type
|
| 231 |
+
|
| 232 |
+
def _get_entity_poly_and_entity_poly_seq_block(self):
|
| 233 |
+
entity_poly = defaultdict(list)
|
| 234 |
+
for entity_id, entity_type in self.entity_poly_type.items():
|
| 235 |
+
label_asym_ids = np.unique(
|
| 236 |
+
self.atom_array.label_asym_id[
|
| 237 |
+
self.atom_array.label_entity_id == entity_id
|
| 238 |
+
]
|
| 239 |
+
)
|
| 240 |
+
label_asym_ids_str = ",".join(label_asym_ids)
|
| 241 |
+
|
| 242 |
+
if label_asym_ids_str == "":
|
| 243 |
+
# The entity not in current atom_array
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
entity_poly["entity_id"].append(entity_id)
|
| 247 |
+
entity_poly["pdbx_strand_id"].append(label_asym_ids_str)
|
| 248 |
+
entity_poly["type"].append(entity_type)
|
| 249 |
+
|
| 250 |
+
entity_poly_seq = defaultdict(list)
|
| 251 |
+
for entity_id, label_asym_ids_str in zip(
|
| 252 |
+
entity_poly["entity_id"], entity_poly["pdbx_strand_id"]
|
| 253 |
+
):
|
| 254 |
+
first_label_asym_id = label_asym_ids_str.split(",")[0]
|
| 255 |
+
first_asym_chain = self.atom_array[
|
| 256 |
+
self.atom_array.label_asym_id == first_label_asym_id
|
| 257 |
+
]
|
| 258 |
+
chain_starts = struc.get_chain_starts(
|
| 259 |
+
first_asym_chain, add_exclusive_stop=True
|
| 260 |
+
)
|
| 261 |
+
asym_chain = first_asym_chain[
|
| 262 |
+
chain_starts[0] : chain_starts[1]
|
| 263 |
+
] # ensure the asym chain is a single chain
|
| 264 |
+
|
| 265 |
+
res_starts = struc.get_residue_starts(asym_chain, add_exclusive_stop=False)
|
| 266 |
+
asym_chain_entity_id = asym_chain[res_starts].label_entity_id.tolist()
|
| 267 |
+
asym_chain_hetero = [
|
| 268 |
+
"n" if not i else "y" for i in asym_chain[res_starts].hetero
|
| 269 |
+
]
|
| 270 |
+
asym_chain_res_name = asym_chain[res_starts].res_name.tolist()
|
| 271 |
+
asym_chain_res_id = asym_chain[res_starts].res_id.tolist()
|
| 272 |
+
|
| 273 |
+
entity_poly_seq["entity_id"].extend(asym_chain_entity_id)
|
| 274 |
+
entity_poly_seq["hetero"].extend(asym_chain_hetero)
|
| 275 |
+
entity_poly_seq["mon_id"].extend(asym_chain_res_name)
|
| 276 |
+
entity_poly_seq["num"].extend(asym_chain_res_id)
|
| 277 |
+
|
| 278 |
+
block_dict = {
|
| 279 |
+
"entity_poly": pdbx.CIFCategory(entity_poly),
|
| 280 |
+
"entity_poly_seq": pdbx.CIFCategory(entity_poly_seq),
|
| 281 |
+
}
|
| 282 |
+
return block_dict
|
| 283 |
+
|
| 284 |
+
def save_to_cif(
|
| 285 |
+
self, output_path: str, entry_id: str = None, include_bonds: bool = False
|
| 286 |
+
):
|
| 287 |
+
"""
|
| 288 |
+
Save AtomArray to cif.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
output_path (str): Output path of cif file.
|
| 292 |
+
entry_id (str, optional): The value of "_entry.id" in cif. Defaults to None.
|
| 293 |
+
If None, the entry_id will be the basename of output_path (without ".cif" extension).
|
| 294 |
+
include_bonds (bool, optional): Whether to include bonds in the cif. Defaults to False.
|
| 295 |
+
If set to True and `array` has associated ``bonds`` , the
|
| 296 |
+
intra-residue bonds will be written into the ``chem_comp_bond``
|
| 297 |
+
category.
|
| 298 |
+
Inter-residue bonds will be written into the ``struct_conn``
|
| 299 |
+
independent of this parameter.
|
| 300 |
+
|
| 301 |
+
"""
|
| 302 |
+
if entry_id is None:
|
| 303 |
+
entry_id = os.path.basename(output_path).replace(".cif", "")
|
| 304 |
+
|
| 305 |
+
block_dict = {"entry": pdbx.CIFCategory({"id": entry_id})}
|
| 306 |
+
if self.entity_poly_type:
|
| 307 |
+
block_dict.update(self._get_entity_poly_and_entity_poly_seq_block())
|
| 308 |
+
|
| 309 |
+
block = pdbx.CIFBlock(block_dict)
|
| 310 |
+
cif = pdbx.CIFFile(
|
| 311 |
+
{
|
| 312 |
+
os.path.basename(output_path).replace(".cif", "")
|
| 313 |
+
+ "_predicted_by_protenix": block
|
| 314 |
+
}
|
| 315 |
+
)
|
| 316 |
+
pdbx.set_structure(cif, self.atom_array, include_bonds=include_bonds)
|
| 317 |
+
block = cif.block
|
| 318 |
+
atom_site = block.get("atom_site")
|
| 319 |
+
atom_site["label_entity_id"] = self.atom_array.label_entity_id
|
| 320 |
+
cif.write(output_path)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def make_dummy_feature(
|
| 324 |
+
features_dict: Mapping[str, torch.Tensor],
|
| 325 |
+
dummy_feats: Sequence = ["msa"],
|
| 326 |
+
) -> dict[str, torch.Tensor]:
|
| 327 |
+
num_token = features_dict["token_index"].shape[0]
|
| 328 |
+
num_atom = features_dict["atom_to_token_idx"].shape[0]
|
| 329 |
+
num_msa = 1
|
| 330 |
+
num_templ = 4
|
| 331 |
+
num_pockets = 30
|
| 332 |
+
feat_shape, _ = get_data_shape_dict(
|
| 333 |
+
num_token=num_token,
|
| 334 |
+
num_atom=num_atom,
|
| 335 |
+
num_msa=num_msa,
|
| 336 |
+
num_templ=num_templ,
|
| 337 |
+
num_pocket=num_pockets,
|
| 338 |
+
)
|
| 339 |
+
for feat_name in dummy_feats:
|
| 340 |
+
if feat_name not in ["msa", "template"]:
|
| 341 |
+
cur_feat_shape = feat_shape[feat_name]
|
| 342 |
+
features_dict[feat_name] = torch.zeros(cur_feat_shape)
|
| 343 |
+
if "msa" in dummy_feats:
|
| 344 |
+
# features_dict["msa"] = features_dict["restype"].unsqueeze(0)
|
| 345 |
+
features_dict["msa"] = torch.nonzero(features_dict["restype"])[:, 1].unsqueeze(
|
| 346 |
+
0
|
| 347 |
+
)
|
| 348 |
+
assert features_dict["msa"].shape == feat_shape["msa"]
|
| 349 |
+
features_dict["has_deletion"] = torch.zeros(feat_shape["has_deletion"])
|
| 350 |
+
features_dict["deletion_value"] = torch.zeros(feat_shape["deletion_value"])
|
| 351 |
+
features_dict["profile"] = features_dict["restype"]
|
| 352 |
+
assert features_dict["profile"].shape == feat_shape["profile"]
|
| 353 |
+
features_dict["deletion_mean"] = torch.zeros(feat_shape["deletion_mean"])
|
| 354 |
+
for key in [
|
| 355 |
+
"prot_pair_num_alignments",
|
| 356 |
+
"prot_unpair_num_alignments",
|
| 357 |
+
"rna_pair_num_alignments",
|
| 358 |
+
"rna_unpair_num_alignments",
|
| 359 |
+
]:
|
| 360 |
+
features_dict[key] = torch.tensor(0, dtype=torch.int32)
|
| 361 |
+
|
| 362 |
+
if "template" in dummy_feats:
|
| 363 |
+
features_dict["template_restype"] = (
|
| 364 |
+
torch.ones(feat_shape["template_restype"]) * 31
|
| 365 |
+
) # gap
|
| 366 |
+
features_dict["template_all_atom_mask"] = torch.zeros(
|
| 367 |
+
feat_shape["template_all_atom_mask"]
|
| 368 |
+
)
|
| 369 |
+
features_dict["template_all_atom_positions"] = torch.zeros(
|
| 370 |
+
feat_shape["template_all_atom_positions"]
|
| 371 |
+
)
|
| 372 |
+
return features_dict
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def data_type_transform(
|
| 376 |
+
feat_or_label_dict: Mapping[str, torch.Tensor]
|
| 377 |
+
) -> tuple[dict[str, torch.Tensor], dict[str, torch.Tensor], AtomArray]:
|
| 378 |
+
for key, value in feat_or_label_dict.items():
|
| 379 |
+
if key in IntDataList:
|
| 380 |
+
feat_or_label_dict[key] = value.to(torch.long)
|
| 381 |
+
|
| 382 |
+
return feat_or_label_dict
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
# List of "index" or "type" data
|
| 386 |
+
# Their data type should be int
|
| 387 |
+
IntDataList = [
|
| 388 |
+
"residue_index",
|
| 389 |
+
"token_index",
|
| 390 |
+
"asym_id",
|
| 391 |
+
"entity_id",
|
| 392 |
+
"sym_id",
|
| 393 |
+
"ref_space_uid",
|
| 394 |
+
"template_restype",
|
| 395 |
+
"atom_to_token_idx",
|
| 396 |
+
"atom_to_tokatom_idx",
|
| 397 |
+
"frame_atom_index",
|
| 398 |
+
"msa",
|
| 399 |
+
"entity_mol_id",
|
| 400 |
+
"mol_id",
|
| 401 |
+
"mol_atom_index",
|
| 402 |
+
]
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
# shape of the data
|
| 406 |
+
def get_data_shape_dict(num_token, num_atom, num_msa, num_templ, num_pocket):
|
| 407 |
+
"""
|
| 408 |
+
Generate a dictionary containing the shapes of all data.
|
| 409 |
+
|
| 410 |
+
Args:
|
| 411 |
+
num_token (int): Number of tokens.
|
| 412 |
+
num_atom (int): Number of atoms.
|
| 413 |
+
num_msa (int): Number of MSA sequences.
|
| 414 |
+
num_templ (int): Number of templates.
|
| 415 |
+
num_pocket (int): Number of pockets to the same interested ligand.
|
| 416 |
+
|
| 417 |
+
Returns:
|
| 418 |
+
dict: A dictionary containing the shapes of all data.
|
| 419 |
+
"""
|
| 420 |
+
# Features in AlphaFold3 SI Table5
|
| 421 |
+
feat = {
|
| 422 |
+
# Token features
|
| 423 |
+
"residue_index": (num_token,),
|
| 424 |
+
"token_index": (num_token,),
|
| 425 |
+
"asym_id": (num_token,),
|
| 426 |
+
"entity_id": (num_token,),
|
| 427 |
+
"sym_id": (num_token,),
|
| 428 |
+
"restype": (num_token, 32),
|
| 429 |
+
# chain permutation features
|
| 430 |
+
"entity_mol_id": (num_atom,),
|
| 431 |
+
"mol_id": (num_atom,),
|
| 432 |
+
"mol_atom_index": (num_atom,),
|
| 433 |
+
# Reference features
|
| 434 |
+
"ref_pos": (num_atom, 3),
|
| 435 |
+
"ref_mask": (num_atom,),
|
| 436 |
+
"ref_element": (num_atom, 128), # note: 128 elem in the paper
|
| 437 |
+
"ref_charge": (num_atom,),
|
| 438 |
+
"ref_atom_name_chars": (num_atom, 4, 64),
|
| 439 |
+
"ref_space_uid": (num_atom,),
|
| 440 |
+
# Msa features
|
| 441 |
+
# "msa": (num_msa, num_token, 32),
|
| 442 |
+
"msa": (num_msa, num_token),
|
| 443 |
+
"has_deletion": (num_msa, num_token),
|
| 444 |
+
"deletion_value": (num_msa, num_token),
|
| 445 |
+
"profile": (num_token, 32),
|
| 446 |
+
"deletion_mean": (num_token,),
|
| 447 |
+
# Template features
|
| 448 |
+
"template_restype": (num_templ, num_token),
|
| 449 |
+
"template_all_atom_mask": (num_templ, num_token, 37),
|
| 450 |
+
"template_all_atom_positions": (num_templ, num_token, 37, 3),
|
| 451 |
+
"template_pseudo_beta_mask": (num_templ, num_token),
|
| 452 |
+
"template_backbone_frame_mask": (num_templ, num_token),
|
| 453 |
+
"template_distogram": (num_templ, num_token, num_token, 39),
|
| 454 |
+
"template_unit_vector": (num_templ, num_token, num_token, 3),
|
| 455 |
+
# Bond features
|
| 456 |
+
"token_bonds": (num_token, num_token),
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
# Extra features needed
|
| 460 |
+
extra_feat = {
|
| 461 |
+
# Input features
|
| 462 |
+
"atom_to_token_idx": (num_atom,), # after crop
|
| 463 |
+
"atom_to_tokatom_idx": (num_atom,), # after crop
|
| 464 |
+
"pae_rep_atom_mask": (num_atom,), # same as "pae_rep_atom_mask" in label_dict
|
| 465 |
+
"is_distillation": (1,),
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
# Label
|
| 469 |
+
label = {
|
| 470 |
+
"coordinate": (num_atom, 3),
|
| 471 |
+
"coordinate_mask": (num_atom,),
|
| 472 |
+
# "centre_atom_mask": (num_atom,),
|
| 473 |
+
# "centre_centre_distance": (num_token, num_token),
|
| 474 |
+
# "centre_centre_distance_mask": (num_token, num_token),
|
| 475 |
+
"distogram_rep_atom_mask": (num_atom,),
|
| 476 |
+
"pae_rep_atom_mask": (num_atom,),
|
| 477 |
+
"plddt_m_rep_atom_mask": (num_atom,),
|
| 478 |
+
"modified_res_mask": (num_atom,),
|
| 479 |
+
"bond_mask": (num_atom, num_atom),
|
| 480 |
+
"is_protein": (num_atom,), # Atom level, not token level
|
| 481 |
+
"is_rna": (num_atom,),
|
| 482 |
+
"is_dna": (num_atom,),
|
| 483 |
+
"is_ligand": (num_atom,),
|
| 484 |
+
"has_frame": (num_token,), # move to input_feature_dict?
|
| 485 |
+
"frame_atom_index": (num_token, 3), # atom index after crop
|
| 486 |
+
"resolution": (1,),
|
| 487 |
+
# Metrics
|
| 488 |
+
"interested_ligand_mask": (
|
| 489 |
+
num_pocket,
|
| 490 |
+
num_atom,
|
| 491 |
+
),
|
| 492 |
+
"pocket_mask": (
|
| 493 |
+
num_pocket,
|
| 494 |
+
num_atom,
|
| 495 |
+
),
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
# Merged
|
| 499 |
+
all_feat = {**feat, **extra_feat}
|
| 500 |
+
return all_feat, label
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def get_lig_lig_bonds(
|
| 504 |
+
atom_array: AtomArray, lig_include_ions: bool = False
|
| 505 |
+
) -> np.ndarray:
|
| 506 |
+
"""
|
| 507 |
+
Get all inter-ligand bonds in order to create "token_bonds".
|
| 508 |
+
|
| 509 |
+
Args:
|
| 510 |
+
atom_array (AtomArray): biotite AtomArray object with "mol_type" attribute.
|
| 511 |
+
lig_include_ions (bool, optional): . Defaults to False.
|
| 512 |
+
|
| 513 |
+
Returns:
|
| 514 |
+
np.ndarray: inter-ligand bonds, e.g. np.array([[atom1, atom2, bond_order]...])
|
| 515 |
+
"""
|
| 516 |
+
if not lig_include_ions:
|
| 517 |
+
# bonded ligand exclude ions
|
| 518 |
+
unique_chain_id, counts = np.unique(
|
| 519 |
+
atom_array.label_asym_id, return_counts=True
|
| 520 |
+
)
|
| 521 |
+
chain_id_to_count_map = dict(zip(unique_chain_id, counts))
|
| 522 |
+
ions_mask = np.array(
|
| 523 |
+
[
|
| 524 |
+
chain_id_to_count_map[label_asym_id] == 1
|
| 525 |
+
for label_asym_id in atom_array.label_asym_id
|
| 526 |
+
]
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
lig_mask = (atom_array.mol_type == "ligand") & ~ions_mask
|
| 530 |
+
else:
|
| 531 |
+
lig_mask = atom_array.mol_type == "ligand"
|
| 532 |
+
|
| 533 |
+
chain_res_id = np.vstack((atom_array.label_asym_id, atom_array.res_id)).T
|
| 534 |
+
idx_i = atom_array.bonds._bonds[:, 0]
|
| 535 |
+
idx_j = atom_array.bonds._bonds[:, 1]
|
| 536 |
+
|
| 537 |
+
ligand_ligand_bond_indices = np.where(
|
| 538 |
+
(lig_mask[idx_i] & lig_mask[idx_j])
|
| 539 |
+
& np.any(chain_res_id[idx_i] != chain_res_id[idx_j], axis=1)
|
| 540 |
+
)[0]
|
| 541 |
+
|
| 542 |
+
if ligand_ligand_bond_indices.size == 0:
|
| 543 |
+
# no ligand-polymer bonds
|
| 544 |
+
lig_polymer_bonds = np.empty((0, 3)).astype(int)
|
| 545 |
+
else:
|
| 546 |
+
lig_polymer_bonds = atom_array.bonds._bonds[ligand_ligand_bond_indices]
|
| 547 |
+
return lig_polymer_bonds
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def pdb_to_cif(input_fname: str, output_fname: str, entry_id: str = None):
|
| 551 |
+
"""
|
| 552 |
+
Convert PDB to CIF.
|
| 553 |
+
|
| 554 |
+
Args:
|
| 555 |
+
input_fname (str): input PDB file name
|
| 556 |
+
output_fname (str): output CIF file name
|
| 557 |
+
entry_id (str, optional): entry id. Defaults to None.
|
| 558 |
+
"""
|
| 559 |
+
pdbfile = PDBFile.read(input_fname)
|
| 560 |
+
atom_array = pdbfile.get_structure(model=1, include_bonds=True, altloc="first")
|
| 561 |
+
|
| 562 |
+
seq_to_entity_id = {}
|
| 563 |
+
cnt = 0
|
| 564 |
+
chain_starts = struc.get_chain_starts(atom_array, add_exclusive_stop=True)
|
| 565 |
+
|
| 566 |
+
# split chains by hetero
|
| 567 |
+
new_chain_starts = []
|
| 568 |
+
for c_start, c_stop in zip(chain_starts[:-1], chain_starts[1:]):
|
| 569 |
+
new_chain_starts.append(c_start)
|
| 570 |
+
chain_start_hetero = atom_array.hetero[c_start]
|
| 571 |
+
hetero_diff = np.where(atom_array.hetero[c_start:c_stop] != chain_start_hetero)
|
| 572 |
+
if hetero_diff[0].shape[0] > 0:
|
| 573 |
+
new_chain_start = c_start + hetero_diff[0][0]
|
| 574 |
+
new_chain_starts.append(new_chain_start)
|
| 575 |
+
|
| 576 |
+
new_chain_starts += [chain_starts[-1]]
|
| 577 |
+
|
| 578 |
+
# # split HETATM chains by res id
|
| 579 |
+
new_chain_starts2 = []
|
| 580 |
+
for c_start, c_stop in zip(new_chain_starts[:-1], new_chain_starts[1:]):
|
| 581 |
+
new_chain_starts2.append(c_start)
|
| 582 |
+
res_id_diff = np.diff(atom_array.res_id[c_start:c_stop])
|
| 583 |
+
uncont_res_starts = np.where(res_id_diff >= 1)
|
| 584 |
+
|
| 585 |
+
if uncont_res_starts[0].shape[0] > 0:
|
| 586 |
+
for res_start_atom_idx in uncont_res_starts[0]:
|
| 587 |
+
new_chain_start = c_start + res_start_atom_idx + 1
|
| 588 |
+
# atom_array.hetero is True if "HETATM"
|
| 589 |
+
if (
|
| 590 |
+
atom_array.hetero[new_chain_start]
|
| 591 |
+
and atom_array.hetero[new_chain_start - 1]
|
| 592 |
+
):
|
| 593 |
+
new_chain_starts2.append(new_chain_start)
|
| 594 |
+
|
| 595 |
+
chain_starts = new_chain_starts2 + [chain_starts[-1]]
|
| 596 |
+
|
| 597 |
+
label_entity_id = np.zeros(len(atom_array), dtype=np.int32)
|
| 598 |
+
atom_index = np.arange(len(atom_array), dtype=np.int32)
|
| 599 |
+
res_id = copy.deepcopy(atom_array.res_id)
|
| 600 |
+
|
| 601 |
+
chain_id = copy.deepcopy(atom_array.chain_id)
|
| 602 |
+
chain_count = 0
|
| 603 |
+
for c_start, c_stop in zip(chain_starts[:-1], chain_starts[1:]):
|
| 604 |
+
chain_count += 1
|
| 605 |
+
new_chain_id = int_to_letters(chain_count)
|
| 606 |
+
chain_id[c_start:c_stop] = new_chain_id
|
| 607 |
+
|
| 608 |
+
chain_array = atom_array[c_start:c_stop]
|
| 609 |
+
residue_starts = struc.get_residue_starts(chain_array, add_exclusive_stop=True)
|
| 610 |
+
resname_seq = [name for name in chain_array[residue_starts[:-1]].res_name]
|
| 611 |
+
resname_str = "_".join(resname_seq)
|
| 612 |
+
if (
|
| 613 |
+
all([name in DNA_STD_RESIDUES for name in resname_seq])
|
| 614 |
+
and resname_str in seq_to_entity_id
|
| 615 |
+
):
|
| 616 |
+
resname_seq = resname_seq[::-1]
|
| 617 |
+
resname_str = "_".join(resname_seq)
|
| 618 |
+
atom_index[c_start:c_stop] = atom_index[c_start:c_stop][::-1]
|
| 619 |
+
|
| 620 |
+
if resname_str not in seq_to_entity_id:
|
| 621 |
+
cnt += 1
|
| 622 |
+
seq_to_entity_id[resname_str] = cnt
|
| 623 |
+
label_entity_id[c_start:c_stop] = seq_to_entity_id[resname_str]
|
| 624 |
+
|
| 625 |
+
res_cnt = 1
|
| 626 |
+
for res_start, res_stop in zip(residue_starts[:-1], residue_starts[1:]):
|
| 627 |
+
res_id[c_start:c_stop][res_start:res_stop] = res_cnt
|
| 628 |
+
res_cnt += 1
|
| 629 |
+
|
| 630 |
+
atom_array = atom_array[atom_index]
|
| 631 |
+
|
| 632 |
+
# add label entity id
|
| 633 |
+
atom_array.set_annotation("label_entity_id", label_entity_id)
|
| 634 |
+
entity_poly_type = {}
|
| 635 |
+
for seq, entity_id in seq_to_entity_id.items():
|
| 636 |
+
resname_seq = seq.split("_")
|
| 637 |
+
|
| 638 |
+
count = defaultdict(int)
|
| 639 |
+
for name in resname_seq:
|
| 640 |
+
if name in PRO_STD_RESIDUES:
|
| 641 |
+
count["prot"] += 1
|
| 642 |
+
elif name in DNA_STD_RESIDUES:
|
| 643 |
+
count["dna"] += 1
|
| 644 |
+
elif name in RNA_STD_RESIDUES:
|
| 645 |
+
count["rna"] += 1
|
| 646 |
+
else:
|
| 647 |
+
count["other"] += 1
|
| 648 |
+
|
| 649 |
+
if count["prot"] >= 2 and count["dna"] == 0 and count["rna"] == 0:
|
| 650 |
+
entity_poly_type[entity_id] = "polypeptide(L)"
|
| 651 |
+
elif count["dna"] >= 2 and count["rna"] == 0 and count["prot"] == 0:
|
| 652 |
+
entity_poly_type[entity_id] = "polydeoxyribonucleotide"
|
| 653 |
+
elif count["rna"] >= 2 and count["dna"] == 0 and count["prot"] == 0:
|
| 654 |
+
entity_poly_type[entity_id] = "polyribonucleotide"
|
| 655 |
+
else:
|
| 656 |
+
# other entity type: ignoring
|
| 657 |
+
continue
|
| 658 |
+
|
| 659 |
+
# add label atom id
|
| 660 |
+
atom_array.set_annotation("label_atom_id", atom_array.atom_name)
|
| 661 |
+
|
| 662 |
+
# add label asym id
|
| 663 |
+
atom_array.chain_id = chain_id # reset chain_id
|
| 664 |
+
atom_array.set_annotation("label_asym_id", atom_array.chain_id)
|
| 665 |
+
|
| 666 |
+
# add label seq id
|
| 667 |
+
atom_array.res_id = res_id # reset res_id
|
| 668 |
+
atom_array.set_annotation("label_seq_id", atom_array.res_id)
|
| 669 |
+
|
| 670 |
+
w = CIFWriter(atom_array=atom_array, entity_poly_type=entity_poly_type)
|
| 671 |
+
w.save_to_cif(
|
| 672 |
+
output_fname,
|
| 673 |
+
entry_id=entry_id or os.path.basename(output_fname),
|
| 674 |
+
include_bonds=True,
|
| 675 |
+
)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/clash.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from typing import Optional
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
|
| 21 |
+
from protenix.data.constants import rdkit_vdws
|
| 22 |
+
|
| 23 |
+
RDKIT_VDWS = torch.tensor(rdkit_vdws)
|
| 24 |
+
ID2TYPE = {0: "UNK", 1: "lig", 2: "prot", 3: "dna", 4: "rna"}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_vdw_radii(elements_one_hot):
|
| 28 |
+
"""get vdw radius for each atom according to their elements"""
|
| 29 |
+
element_order = elements_one_hot.argmax(dim=1)
|
| 30 |
+
return RDKIT_VDWS.to(element_order.device)[element_order]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Clash(nn.Module):
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
af3_clash_threshold=1.1,
|
| 37 |
+
vdw_clash_threshold=0.75,
|
| 38 |
+
compute_af3_clash=True,
|
| 39 |
+
compute_vdw_clash=True,
|
| 40 |
+
):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.af3_clash_threshold = af3_clash_threshold
|
| 43 |
+
self.vdw_clash_threshold = vdw_clash_threshold
|
| 44 |
+
self.compute_af3_clash = compute_af3_clash
|
| 45 |
+
self.compute_vdw_clash = compute_vdw_clash
|
| 46 |
+
|
| 47 |
+
def forward(
|
| 48 |
+
self,
|
| 49 |
+
pred_coordinate,
|
| 50 |
+
asym_id,
|
| 51 |
+
atom_to_token_idx,
|
| 52 |
+
is_ligand,
|
| 53 |
+
is_protein,
|
| 54 |
+
is_dna,
|
| 55 |
+
is_rna,
|
| 56 |
+
mol_id: Optional[torch.Tensor] = None,
|
| 57 |
+
elements_one_hot: Optional[torch.Tensor] = None,
|
| 58 |
+
):
|
| 59 |
+
chain_info = self.get_chain_info(
|
| 60 |
+
asym_id=asym_id,
|
| 61 |
+
atom_to_token_idx=atom_to_token_idx,
|
| 62 |
+
is_ligand=is_ligand,
|
| 63 |
+
is_protein=is_protein,
|
| 64 |
+
is_dna=is_dna,
|
| 65 |
+
is_rna=is_rna,
|
| 66 |
+
mol_id=mol_id,
|
| 67 |
+
elements_one_hot=elements_one_hot,
|
| 68 |
+
)
|
| 69 |
+
return self._check_clash_per_chain_pairs(
|
| 70 |
+
pred_coordinate=pred_coordinate, **chain_info
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def get_chain_info(
|
| 74 |
+
self,
|
| 75 |
+
asym_id,
|
| 76 |
+
atom_to_token_idx,
|
| 77 |
+
is_ligand,
|
| 78 |
+
is_protein,
|
| 79 |
+
is_dna,
|
| 80 |
+
is_rna,
|
| 81 |
+
mol_id: Optional[torch.Tensor] = None,
|
| 82 |
+
elements_one_hot: Optional[torch.Tensor] = None,
|
| 83 |
+
):
|
| 84 |
+
# Get chain info
|
| 85 |
+
asym_id = asym_id.long()
|
| 86 |
+
asym_id_to_asym_mask = {
|
| 87 |
+
aid.item(): asym_id == aid for aid in torch.unique(asym_id)
|
| 88 |
+
}
|
| 89 |
+
N_chains = len(asym_id_to_asym_mask)
|
| 90 |
+
# Make sure it is from 0 to N_chain-1
|
| 91 |
+
assert N_chains == asym_id.max() + 1
|
| 92 |
+
|
| 93 |
+
# Check and compute chain_types
|
| 94 |
+
chain_types = []
|
| 95 |
+
mol_id_to_asym_ids, asym_id_to_mol_id = {}, {}
|
| 96 |
+
atom_type = (1 * is_ligand + 2 * is_protein + 3 * is_dna + 4 * is_rna).long()
|
| 97 |
+
if self.compute_vdw_clash:
|
| 98 |
+
assert mol_id is not None
|
| 99 |
+
assert elements_one_hot is not None
|
| 100 |
+
|
| 101 |
+
for aid in range(N_chains):
|
| 102 |
+
atom_chain_mask = asym_id_to_asym_mask[aid][atom_to_token_idx]
|
| 103 |
+
atom_type_i = atom_type[atom_chain_mask]
|
| 104 |
+
assert len(atom_type_i.unique()) == 1
|
| 105 |
+
if atom_type_i[0].item() == 0:
|
| 106 |
+
logging.warning(
|
| 107 |
+
"Unknown asym_id type: not in ligand / protein / dna / rna"
|
| 108 |
+
)
|
| 109 |
+
chain_types.append(ID2TYPE[atom_type_i[0].item()])
|
| 110 |
+
if self.compute_vdw_clash:
|
| 111 |
+
# Check if all atoms in a chain are from the same molecule
|
| 112 |
+
mol_id_i = mol_id[atom_chain_mask].unique().item()
|
| 113 |
+
mol_id_to_asym_ids.setdefault(mol_id_i, []).append(aid)
|
| 114 |
+
asym_id_to_mol_id[aid] = mol_id_i
|
| 115 |
+
|
| 116 |
+
chain_info = {
|
| 117 |
+
"N_chains": N_chains,
|
| 118 |
+
"atom_to_token_idx": atom_to_token_idx,
|
| 119 |
+
"asym_id_to_asym_mask": asym_id_to_asym_mask,
|
| 120 |
+
"atom_type": atom_type,
|
| 121 |
+
"mol_id": mol_id,
|
| 122 |
+
"elements_one_hot": elements_one_hot,
|
| 123 |
+
"chain_types": chain_types,
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
if self.compute_vdw_clash:
|
| 127 |
+
chain_info.update({"asym_id_to_mol_id": asym_id_to_mol_id})
|
| 128 |
+
|
| 129 |
+
return chain_info
|
| 130 |
+
|
| 131 |
+
def get_chain_pair_violations(
|
| 132 |
+
self,
|
| 133 |
+
pred_coordinate,
|
| 134 |
+
violation_type,
|
| 135 |
+
chain_1_mask,
|
| 136 |
+
chain_2_mask,
|
| 137 |
+
elements_one_hot: Optional[torch.Tensor] = None,
|
| 138 |
+
):
|
| 139 |
+
chain_1_coords = pred_coordinate[chain_1_mask, :]
|
| 140 |
+
chain_2_coords = pred_coordinate[chain_2_mask, :]
|
| 141 |
+
pred_dist = torch.cdist(chain_1_coords, chain_2_coords)
|
| 142 |
+
if violation_type == "af3":
|
| 143 |
+
clash_per_atom_pair = (
|
| 144 |
+
pred_dist < self.af3_clash_threshold
|
| 145 |
+
) # [ N_atom_chain_1, N_atom_chain_2]
|
| 146 |
+
clashed_col, clashed_row = torch.where(clash_per_atom_pair)
|
| 147 |
+
clash_atom_pairs = torch.stack((clashed_col, clashed_row), dim=-1)
|
| 148 |
+
else:
|
| 149 |
+
assert elements_one_hot is not None
|
| 150 |
+
vdw_radii_i, vdw_radii_j = get_vdw_radii(
|
| 151 |
+
elements_one_hot[chain_1_mask, :]
|
| 152 |
+
), get_vdw_radii(elements_one_hot[chain_2_mask, :])
|
| 153 |
+
vdw_sum_pair = (
|
| 154 |
+
vdw_radii_i[:, None] + vdw_radii_j[None, :]
|
| 155 |
+
) # [N_atom_chain_1, N_atom_chain_2]
|
| 156 |
+
relative_vdw_distance = pred_dist / vdw_sum_pair
|
| 157 |
+
clash_per_atom_pair = (
|
| 158 |
+
relative_vdw_distance < self.vdw_clash_threshold
|
| 159 |
+
) # [N_atom_chain_1, N_atom_chain_2]
|
| 160 |
+
clashed_col, clashed_row = torch.where(clash_per_atom_pair)
|
| 161 |
+
clash_rel_dist = relative_vdw_distance[clashed_col, clashed_row]
|
| 162 |
+
clashed_global_col = torch.where(chain_1_mask)[0][clashed_col]
|
| 163 |
+
clashed_global_row = torch.where(chain_2_mask)[0][clashed_row]
|
| 164 |
+
clash_atom_pairs = torch.stack(
|
| 165 |
+
(clashed_global_col, clashed_global_row, clash_rel_dist), dim=-1
|
| 166 |
+
)
|
| 167 |
+
return clash_atom_pairs
|
| 168 |
+
|
| 169 |
+
def _check_clash_per_chain_pairs(
|
| 170 |
+
self,
|
| 171 |
+
pred_coordinate,
|
| 172 |
+
atom_to_token_idx,
|
| 173 |
+
N_chains,
|
| 174 |
+
atom_type,
|
| 175 |
+
chain_types,
|
| 176 |
+
elements_one_hot,
|
| 177 |
+
asym_id_to_asym_mask,
|
| 178 |
+
mol_id: Optional[torch.Tensor] = None,
|
| 179 |
+
asym_id_to_mol_id: Optional[torch.Tensor] = None,
|
| 180 |
+
):
|
| 181 |
+
device = pred_coordinate.device
|
| 182 |
+
N_sample = pred_coordinate.shape[0]
|
| 183 |
+
|
| 184 |
+
# initialize results
|
| 185 |
+
if self.compute_af3_clash:
|
| 186 |
+
has_af3_clash_flag = torch.zeros(
|
| 187 |
+
N_sample, N_chains, N_chains, device=device, dtype=torch.bool
|
| 188 |
+
)
|
| 189 |
+
af3_clash_details = torch.zeros(
|
| 190 |
+
N_sample, N_chains, N_chains, 2, device=device, dtype=torch.bool
|
| 191 |
+
)
|
| 192 |
+
if self.compute_vdw_clash:
|
| 193 |
+
has_vdw_clash_flag = torch.zeros(
|
| 194 |
+
N_sample, N_chains, N_chains, device=device, dtype=torch.bool
|
| 195 |
+
)
|
| 196 |
+
vdw_clash_details = {}
|
| 197 |
+
|
| 198 |
+
skipped_pairs = []
|
| 199 |
+
for sample_id in range(N_sample):
|
| 200 |
+
for i in range(N_chains):
|
| 201 |
+
if chain_types[i] == "UNK":
|
| 202 |
+
continue
|
| 203 |
+
atom_chain_mask_i = asym_id_to_asym_mask[i][atom_to_token_idx]
|
| 204 |
+
N_chain_i = torch.sum(atom_chain_mask_i).item()
|
| 205 |
+
for j in range(i + 1, N_chains):
|
| 206 |
+
if chain_types[j] == "UNK":
|
| 207 |
+
continue
|
| 208 |
+
chain_pair_type = set([chain_types[i], chain_types[j]])
|
| 209 |
+
# Skip potential bonded ligand to polymers
|
| 210 |
+
skip_bonded_ligand = False
|
| 211 |
+
if (
|
| 212 |
+
self.compute_vdw_clash
|
| 213 |
+
and "lig" in chain_pair_type
|
| 214 |
+
and len(chain_pair_type) > 1
|
| 215 |
+
and asym_id_to_mol_id[i] == asym_id_to_mol_id[j]
|
| 216 |
+
):
|
| 217 |
+
common_mol_id = asym_id_to_mol_id[i]
|
| 218 |
+
logging.warning(
|
| 219 |
+
f"mol_id {common_mol_id} may contain bonded ligand to polymers"
|
| 220 |
+
)
|
| 221 |
+
skip_bonded_ligand = True
|
| 222 |
+
skipped_pairs.append((i, j))
|
| 223 |
+
atom_chain_mask_j = asym_id_to_asym_mask[j][atom_to_token_idx]
|
| 224 |
+
N_chain_j = torch.sum(atom_chain_mask_j).item()
|
| 225 |
+
if self.compute_vdw_clash and not skip_bonded_ligand:
|
| 226 |
+
vdw_clash_pairs = self.get_chain_pair_violations(
|
| 227 |
+
pred_coordinate=pred_coordinate[sample_id, :, :],
|
| 228 |
+
violation_type="vdw",
|
| 229 |
+
chain_1_mask=atom_chain_mask_i,
|
| 230 |
+
chain_2_mask=atom_chain_mask_j,
|
| 231 |
+
elements_one_hot=elements_one_hot,
|
| 232 |
+
)
|
| 233 |
+
if vdw_clash_pairs.shape[0] > 0:
|
| 234 |
+
vdw_clash_details[(sample_id, i, j)] = vdw_clash_pairs
|
| 235 |
+
has_vdw_clash_flag[sample_id, i, j] = True
|
| 236 |
+
has_vdw_clash_flag[sample_id, j, i] = True
|
| 237 |
+
if (
|
| 238 |
+
chain_types[i] == "lig" or chain_types[j] == "lig"
|
| 239 |
+
): # AF3 clash only consider polymer chains
|
| 240 |
+
continue
|
| 241 |
+
if self.compute_af3_clash:
|
| 242 |
+
af3_clash_pairs = self.get_chain_pair_violations(
|
| 243 |
+
pred_coordinate=pred_coordinate[sample_id, :, :],
|
| 244 |
+
violation_type="af3",
|
| 245 |
+
chain_1_mask=atom_chain_mask_i,
|
| 246 |
+
chain_2_mask=atom_chain_mask_j,
|
| 247 |
+
)
|
| 248 |
+
total_clash = af3_clash_pairs.shape[0]
|
| 249 |
+
relative_clash = total_clash / min(N_chain_i, N_chain_j)
|
| 250 |
+
af3_clash_details[sample_id, i, j, 0] = total_clash
|
| 251 |
+
af3_clash_details[sample_id, i, j, 1] = relative_clash
|
| 252 |
+
has_af3_clash_flag[sample_id, i, j] = (
|
| 253 |
+
total_clash > 100 or relative_clash > 0.5
|
| 254 |
+
)
|
| 255 |
+
af3_clash_details[sample_id, j, i, :] = af3_clash_details[
|
| 256 |
+
sample_id, i, j, :
|
| 257 |
+
]
|
| 258 |
+
has_af3_clash_flag[sample_id, j, i] = has_af3_clash_flag[
|
| 259 |
+
sample_id, i, j
|
| 260 |
+
]
|
| 261 |
+
return {
|
| 262 |
+
"summary": {
|
| 263 |
+
"af3_clash": has_af3_clash_flag if self.compute_af3_clash else None,
|
| 264 |
+
"vdw_clash": has_vdw_clash_flag if self.compute_vdw_clash else None,
|
| 265 |
+
"chain_types": chain_types,
|
| 266 |
+
"skipped_pairs": skipped_pairs,
|
| 267 |
+
},
|
| 268 |
+
"details": {
|
| 269 |
+
"af3_clash": af3_clash_details if self.compute_af3_clash else None,
|
| 270 |
+
"vdw_clash": vdw_clash_details if self.compute_vdw_clash else None,
|
| 271 |
+
},
|
| 272 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/lddt_metrics.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Optional
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
|
| 20 |
+
from protenix.model import sample_confidence
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_complex_level_rankers(scores, keys):
|
| 24 |
+
assert all([k in ["plddt", "gpde", "ranking_score"] for k in keys])
|
| 25 |
+
rankers = {}
|
| 26 |
+
for key in keys:
|
| 27 |
+
if key == "gpde":
|
| 28 |
+
descending = False
|
| 29 |
+
else:
|
| 30 |
+
descending = True
|
| 31 |
+
ranking = scores[key].argsort(dim=0, descending=descending)
|
| 32 |
+
rankers[f"{key}.rank1"] = lambda x, rank1_idx=ranking[0].item(): x[
|
| 33 |
+
..., rank1_idx
|
| 34 |
+
]
|
| 35 |
+
return rankers
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def add_diff_metrics(scores, ranker_keys):
|
| 39 |
+
diff_metrics = {
|
| 40 |
+
"diff/best_worst": scores["best"] - scores["worst"],
|
| 41 |
+
"diff/best_random": scores["best"] - scores["random"],
|
| 42 |
+
"diff/best_median": scores["best"] - scores["median"],
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
for key in ranker_keys:
|
| 46 |
+
diff_metrics.update(
|
| 47 |
+
{
|
| 48 |
+
f"diff/best_{key}": scores["best"] - scores[f"{key}.rank1"],
|
| 49 |
+
f"diff/{key}_median": scores[f"{key}.rank1"] - scores["median"],
|
| 50 |
+
}
|
| 51 |
+
)
|
| 52 |
+
scores.update(diff_metrics)
|
| 53 |
+
return scores
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class LDDTMetrics(nn.Module):
|
| 57 |
+
"""LDDT: evaluated on chains and interfaces"""
|
| 58 |
+
|
| 59 |
+
def __init__(self, configs):
|
| 60 |
+
super(LDDTMetrics, self).__init__()
|
| 61 |
+
self.eps = configs.metrics.lddt.eps
|
| 62 |
+
self.configs = configs
|
| 63 |
+
self.chunk_size = self.configs.infer_setting.lddt_metrics_chunk_size
|
| 64 |
+
self.lddt_base = LDDT(eps=self.eps)
|
| 65 |
+
|
| 66 |
+
self.complex_ranker_keys = configs.metrics.get(
|
| 67 |
+
"complex_ranker_keys", ["plddt", "gpde", "ranking_score"]
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def compute_lddt(self, pred_dict: dict, label_dict: dict):
|
| 71 |
+
"""compute complex-level and chain/interface-level lddt
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
pred_dict (Dict): a dictionary containing
|
| 75 |
+
coordinate: [N_sample, N_atom, 3]
|
| 76 |
+
label_dict (Dict): a dictionary containing
|
| 77 |
+
coordinate: [N_sample, N_atom, 3]
|
| 78 |
+
lddt_mask: [N_atom, N_atom]
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
out = {}
|
| 82 |
+
|
| 83 |
+
# Complex-level
|
| 84 |
+
lddt = self.lddt_base.forward(
|
| 85 |
+
pred_coordinate=pred_dict["coordinate"],
|
| 86 |
+
true_coordinate=label_dict["coordinate"],
|
| 87 |
+
lddt_mask=label_dict["lddt_mask"],
|
| 88 |
+
chunk_size=self.chunk_size,
|
| 89 |
+
) # [N_sample]
|
| 90 |
+
out["complex"] = lddt
|
| 91 |
+
|
| 92 |
+
return out
|
| 93 |
+
|
| 94 |
+
def aggregate(
|
| 95 |
+
self,
|
| 96 |
+
vals,
|
| 97 |
+
dim: int = -1,
|
| 98 |
+
aggregators: dict = {},
|
| 99 |
+
):
|
| 100 |
+
N_sample = vals.size(dim)
|
| 101 |
+
median_index = N_sample // 2
|
| 102 |
+
basic_sample_aggregators = {
|
| 103 |
+
"best": lambda x: x.max(dim=dim)[0],
|
| 104 |
+
"worst": lambda x: x.min(dim=dim)[0],
|
| 105 |
+
"random": lambda x: x.select(dim=dim, index=0),
|
| 106 |
+
"mean": lambda x: x.mean(dim=dim),
|
| 107 |
+
"median": lambda x: x.sort(dim=dim, descending=True)[0].select(
|
| 108 |
+
dim=dim, index=median_index
|
| 109 |
+
),
|
| 110 |
+
}
|
| 111 |
+
sample_aggregators = {**basic_sample_aggregators, **aggregators}
|
| 112 |
+
|
| 113 |
+
return {
|
| 114 |
+
agg_name: agg_func(vals)
|
| 115 |
+
for agg_name, agg_func in sample_aggregators.items()
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
def aggregate_lddt(self, lddt_dict, per_sample_summary_confidence):
|
| 119 |
+
|
| 120 |
+
# Merge summary_confidence results
|
| 121 |
+
confidence_scores = sample_confidence.merge_per_sample_confidence_scores(
|
| 122 |
+
per_sample_summary_confidence
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Complex-level LDDT
|
| 126 |
+
complex_level_ranker = get_complex_level_rankers(
|
| 127 |
+
confidence_scores, self.complex_ranker_keys
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
complex_lddt = self.aggregate(
|
| 131 |
+
lddt_dict["complex"], aggregators=complex_level_ranker
|
| 132 |
+
)
|
| 133 |
+
complex_lddt = add_diff_metrics(complex_lddt, self.complex_ranker_keys)
|
| 134 |
+
# Log metrics
|
| 135 |
+
complex_lddt = {
|
| 136 |
+
f"lddt/complex/{name}": value for name, value in complex_lddt.items()
|
| 137 |
+
}
|
| 138 |
+
return complex_lddt, {}
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class LDDT(nn.Module):
|
| 142 |
+
"""LDDT base metrics"""
|
| 143 |
+
|
| 144 |
+
def __init__(self, eps: float = 1e-10):
|
| 145 |
+
super(LDDT, self).__init__()
|
| 146 |
+
self.eps = eps
|
| 147 |
+
|
| 148 |
+
def _chunk_base_forward(self, pred_distance, true_distance) -> torch.Tensor:
|
| 149 |
+
distance_error_l1 = torch.abs(
|
| 150 |
+
pred_distance - true_distance
|
| 151 |
+
) # [N_sample, N_pair_sparse]
|
| 152 |
+
thresholds = [0.5, 1, 2, 4]
|
| 153 |
+
sparse_pair_lddt = (
|
| 154 |
+
torch.stack([distance_error_l1 < t for t in thresholds], dim=-1)
|
| 155 |
+
.to(dtype=distance_error_l1.dtype)
|
| 156 |
+
.mean(dim=-1)
|
| 157 |
+
) # [N_sample, N_pair_sparse]
|
| 158 |
+
del distance_error_l1
|
| 159 |
+
# Compute mean
|
| 160 |
+
if sparse_pair_lddt.numel() == 0: # corespand to all zero in dense mask
|
| 161 |
+
sparse_pair_lddt = torch.zeros_like(sparse_pair_lddt)
|
| 162 |
+
lddt = torch.mean(sparse_pair_lddt, dim=-1)
|
| 163 |
+
return lddt
|
| 164 |
+
|
| 165 |
+
def _chunk_forward(
|
| 166 |
+
self, pred_distance, true_distance, chunk_size: Optional[int] = None
|
| 167 |
+
) -> torch.Tensor:
|
| 168 |
+
if chunk_size is None:
|
| 169 |
+
return self._chunk_base_forward(pred_distance, true_distance)
|
| 170 |
+
else:
|
| 171 |
+
lddt = []
|
| 172 |
+
N_sample = pred_distance.shape[-2]
|
| 173 |
+
no_chunks = N_sample // chunk_size + (N_sample % chunk_size != 0)
|
| 174 |
+
for i in range(no_chunks):
|
| 175 |
+
lddt_i = self._chunk_base_forward(
|
| 176 |
+
pred_distance[
|
| 177 |
+
...,
|
| 178 |
+
i * chunk_size : (i + 1) * chunk_size,
|
| 179 |
+
:,
|
| 180 |
+
],
|
| 181 |
+
true_distance,
|
| 182 |
+
)
|
| 183 |
+
lddt.append(lddt_i)
|
| 184 |
+
lddt = torch.cat(lddt, dim=-1) # [N_sample]
|
| 185 |
+
return lddt
|
| 186 |
+
|
| 187 |
+
def _calc_sparse_dist(self, pred_coordinate, true_coordinate, l_index, m_index):
|
| 188 |
+
pred_coords_l = pred_coordinate.index_select(
|
| 189 |
+
-2, l_index
|
| 190 |
+
) # [N_sample, N_atom_sparse_l, 3]
|
| 191 |
+
pred_coords_m = pred_coordinate.index_select(
|
| 192 |
+
-2, m_index
|
| 193 |
+
) # [N_sample, N_atom_sparse_m, 3]
|
| 194 |
+
true_coords_l = true_coordinate.index_select(
|
| 195 |
+
-2, l_index
|
| 196 |
+
) # [N_atom_sparse_l, 3]
|
| 197 |
+
true_coords_m = true_coordinate.index_select(
|
| 198 |
+
-2, m_index
|
| 199 |
+
) # [N_atom_sparse_m, 3]
|
| 200 |
+
|
| 201 |
+
pred_distance_sparse_lm = torch.norm(
|
| 202 |
+
pred_coords_l - pred_coords_m, p=2, dim=-1
|
| 203 |
+
) # [N_sample, N_pair_sparse]
|
| 204 |
+
true_distance_sparse_lm = torch.norm(
|
| 205 |
+
true_coords_l - true_coords_m, p=2, dim=-1
|
| 206 |
+
) # [N_sample, N_pair_sparse]
|
| 207 |
+
return pred_distance_sparse_lm, true_distance_sparse_lm
|
| 208 |
+
|
| 209 |
+
def forward(
|
| 210 |
+
self,
|
| 211 |
+
pred_coordinate: torch.Tensor,
|
| 212 |
+
true_coordinate: torch.Tensor,
|
| 213 |
+
lddt_mask: torch.Tensor,
|
| 214 |
+
chunk_size: Optional[int] = None,
|
| 215 |
+
) -> dict[str, torch.Tensor]:
|
| 216 |
+
"""LDDT: evaluated on complex, chains and interfaces
|
| 217 |
+
sparse implementation, which largely reduce cuda memory when atom num reaches 10^4 +
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
pred_coordinate (torch.Tensor): the pred coordinates
|
| 221 |
+
[N_sample, N_atom, 3]
|
| 222 |
+
true_coordinate (torch.Tensor): the ground truth atom coordinates
|
| 223 |
+
[N_atom, 3]
|
| 224 |
+
lddt_mask (torch.Tensor):
|
| 225 |
+
sparse version of [N_atom, N_atom] atompair mask based on bespoke radius of true distance
|
| 226 |
+
[N_nonzero_mask, 2]
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
Dict[str, torch.Tensor]:
|
| 230 |
+
"best": [N_eval]
|
| 231 |
+
"worst": [N_eval]
|
| 232 |
+
"""
|
| 233 |
+
lddt_indices = torch.nonzero(lddt_mask, as_tuple=True)
|
| 234 |
+
l_index = lddt_indices[0]
|
| 235 |
+
m_index = lddt_indices[1]
|
| 236 |
+
pred_distance_sparse_lm, true_distance_sparse_lm = self._calc_sparse_dist(
|
| 237 |
+
pred_coordinate, true_coordinate, l_index, m_index
|
| 238 |
+
)
|
| 239 |
+
group_lddt = self._chunk_forward(
|
| 240 |
+
pred_distance_sparse_lm, true_distance_sparse_lm, chunk_size=chunk_size
|
| 241 |
+
) # [N_sample]
|
| 242 |
+
return group_lddt
|
| 243 |
+
|
| 244 |
+
@staticmethod
|
| 245 |
+
def compute_lddt_mask(
|
| 246 |
+
true_coordinate: torch.Tensor,
|
| 247 |
+
true_coordinate_mask: torch.Tensor,
|
| 248 |
+
is_nucleotide: torch.Tensor = None,
|
| 249 |
+
is_nucleotide_threshold: float = 30.0,
|
| 250 |
+
threshold: float = 15.0,
|
| 251 |
+
):
|
| 252 |
+
# Distance mask
|
| 253 |
+
distance_mask = (
|
| 254 |
+
true_coordinate_mask[..., None] * true_coordinate_mask[..., None, :]
|
| 255 |
+
)
|
| 256 |
+
# Distances for all atom pairs
|
| 257 |
+
# Note: we convert to bf16 for saving cuda memory, if performance drops, do not convert it
|
| 258 |
+
distance = (torch.cdist(true_coordinate, true_coordinate) * distance_mask).to(
|
| 259 |
+
true_coordinate.dtype
|
| 260 |
+
) # [..., N_atom, N_atom]
|
| 261 |
+
|
| 262 |
+
# Local mask
|
| 263 |
+
c_lm = distance < threshold # [..., N_atom, N_atom]
|
| 264 |
+
if is_nucleotide is not None:
|
| 265 |
+
# Use a different radius for nucleotide
|
| 266 |
+
is_nucleotide_mask = is_nucleotide.bool()[..., None]
|
| 267 |
+
c_lm = (distance < is_nucleotide_threshold) * is_nucleotide_mask + c_lm * (
|
| 268 |
+
~is_nucleotide_mask
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Zero-out diagonals of c_lm and cast to float
|
| 272 |
+
c_lm = c_lm * (
|
| 273 |
+
1 - torch.eye(n=c_lm.size(-1), device=c_lm.device, dtype=distance.dtype)
|
| 274 |
+
)
|
| 275 |
+
# Zero-out atom pairs without true coordinates
|
| 276 |
+
c_lm = c_lm * distance_mask # [..., N_atom, N_atom]
|
| 277 |
+
return c_lm
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/metrics/rmsd.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Optional
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def rmsd(
|
| 21 |
+
pred_pose: torch.Tensor,
|
| 22 |
+
true_pose: torch.Tensor,
|
| 23 |
+
mask: torch.Tensor = None,
|
| 24 |
+
eps: float = 0.0,
|
| 25 |
+
reduce: bool = True,
|
| 26 |
+
):
|
| 27 |
+
"""
|
| 28 |
+
compute rmsd between two poses, with the same shape
|
| 29 |
+
Arguments:
|
| 30 |
+
pred_pose/true_pose: [...,N,3], two poses with the same shape
|
| 31 |
+
mask: [..., N], mask to indicate which atoms/pseudo_betas/etc to compute
|
| 32 |
+
eps: add a tolerance to avoid floating number issue
|
| 33 |
+
reduce: decide the return shape of rmsd;
|
| 34 |
+
Return:
|
| 35 |
+
rmsd: if reduce = true, return the mean of rmsd over batches;
|
| 36 |
+
else return a tensor containing each rmsd separately
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
# mask [..., N]
|
| 40 |
+
assert pred_pose.shape == true_pose.shape # [..., N, 3]
|
| 41 |
+
|
| 42 |
+
if mask is None:
|
| 43 |
+
mask = torch.ones(true_pose.shape[:-1], device=true_pose.device)
|
| 44 |
+
|
| 45 |
+
# [...]
|
| 46 |
+
err2 = (torch.square(pred_pose - true_pose).sum(dim=-1) * mask).sum(
|
| 47 |
+
dim=-1
|
| 48 |
+
) / mask.sum(dim=-1)
|
| 49 |
+
rmsd = err2.add(eps).sqrt()
|
| 50 |
+
if reduce:
|
| 51 |
+
rmsd = rmsd.mean()
|
| 52 |
+
return rmsd
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def align_pred_to_true(
|
| 56 |
+
pred_pose: torch.Tensor,
|
| 57 |
+
true_pose: torch.Tensor,
|
| 58 |
+
atom_mask: Optional[torch.Tensor] = None,
|
| 59 |
+
weight: Optional[torch.Tensor] = None,
|
| 60 |
+
allowing_reflection: bool = False,
|
| 61 |
+
):
|
| 62 |
+
"""Find optimal transformation, rotation (and reflection) of two poses.
|
| 63 |
+
Arguments:
|
| 64 |
+
pred_pose: [...,N,3] the pose to perform transformation on
|
| 65 |
+
true_pose: [...,N,3] the target pose to align pred_pose to
|
| 66 |
+
atom_mask: [..., N] a mask for atoms
|
| 67 |
+
weight: [..., N] a weight vector to be applied.
|
| 68 |
+
allow_reflection: whether to allow reflection when finding optimal alignment
|
| 69 |
+
return:
|
| 70 |
+
aligned_pose: [...,N,3] the transformed pose
|
| 71 |
+
rot: optimal rotation
|
| 72 |
+
translate: optimal translation
|
| 73 |
+
"""
|
| 74 |
+
if atom_mask is not None:
|
| 75 |
+
pred_pose = pred_pose * atom_mask.unsqueeze(-1)
|
| 76 |
+
true_pose = true_pose * atom_mask.unsqueeze(-1)
|
| 77 |
+
else:
|
| 78 |
+
atom_mask = torch.ones(*pred_pose.shape[:-1]).to(pred_pose.device)
|
| 79 |
+
|
| 80 |
+
if weight is None:
|
| 81 |
+
weight = atom_mask
|
| 82 |
+
else:
|
| 83 |
+
weight = weight * atom_mask
|
| 84 |
+
|
| 85 |
+
weighted_n_atoms = torch.sum(weight, dim=-1, keepdim=True).unsqueeze(-1)
|
| 86 |
+
pred_pose_centroid = (
|
| 87 |
+
torch.sum(pred_pose * weight.unsqueeze(-1), dim=-2, keepdim=True)
|
| 88 |
+
/ weighted_n_atoms
|
| 89 |
+
)
|
| 90 |
+
pred_pose_centered = pred_pose - pred_pose_centroid
|
| 91 |
+
true_pose_centroid = (
|
| 92 |
+
torch.sum(true_pose * weight.unsqueeze(-1), dim=-2, keepdim=True)
|
| 93 |
+
/ weighted_n_atoms
|
| 94 |
+
)
|
| 95 |
+
true_pose_centered = true_pose - true_pose_centroid
|
| 96 |
+
H_mat = torch.matmul(
|
| 97 |
+
(pred_pose_centered * weight.unsqueeze(-1)).transpose(-2, -1),
|
| 98 |
+
true_pose_centered * atom_mask.unsqueeze(-1),
|
| 99 |
+
)
|
| 100 |
+
u, s, v = torch.svd(H_mat)
|
| 101 |
+
u = u.transpose(-1, -2)
|
| 102 |
+
|
| 103 |
+
if not allowing_reflection:
|
| 104 |
+
|
| 105 |
+
det = torch.linalg.det(torch.matmul(v, u))
|
| 106 |
+
|
| 107 |
+
diagonal = torch.stack(
|
| 108 |
+
[torch.ones_like(det), torch.ones_like(det), det], dim=-1
|
| 109 |
+
)
|
| 110 |
+
rot = torch.matmul(
|
| 111 |
+
torch.diag_embed(diagonal).to(u.device),
|
| 112 |
+
u,
|
| 113 |
+
)
|
| 114 |
+
rot = torch.matmul(v, rot)
|
| 115 |
+
else:
|
| 116 |
+
rot = torch.matmul(v, u)
|
| 117 |
+
translate = true_pose_centroid - torch.matmul(
|
| 118 |
+
pred_pose_centroid, rot.transpose(-1, -2)
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
pred_pose_translated = (
|
| 122 |
+
torch.matmul(pred_pose_centered, rot.transpose(-1, -2)) + true_pose_centroid
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
return pred_pose_translated, rot, translate
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def partially_aligned_rmsd(
|
| 129 |
+
pred_pose: torch.Tensor,
|
| 130 |
+
true_pose: torch.Tensor,
|
| 131 |
+
align_mask: torch.Tensor,
|
| 132 |
+
atom_mask: torch.Tensor,
|
| 133 |
+
weight: Optional[torch.Tensor] = None,
|
| 134 |
+
eps: float = 0.0,
|
| 135 |
+
reduce: bool = True,
|
| 136 |
+
allowing_reflection: bool = False,
|
| 137 |
+
):
|
| 138 |
+
"""RMSD when aligning parts of the complex coordinate, does NOT take permutation symmetricity into consideration
|
| 139 |
+
Arguments:
|
| 140 |
+
pred_pose: native predicted pose, [..., N,3]
|
| 141 |
+
true_pose: ground truth pose, [..., N, 3]
|
| 142 |
+
align_mask: a mask representing which coordinates to align [..., N]
|
| 143 |
+
atom_mask: a mask representing which coordinates to compute loss [..., N]
|
| 144 |
+
weight: a weight tensor assining weights in alignment for each atom [..., N]
|
| 145 |
+
eps: add a tolerance to avoid floating number issue in sqrt
|
| 146 |
+
reduce: decide the return shape of rmsd;
|
| 147 |
+
allowing_reflection: whether to allow reflection when finding optimal alignment
|
| 148 |
+
return:
|
| 149 |
+
aligned_part_rmsd: the rmsd of part being align_masked
|
| 150 |
+
unaligned_part_rmsd: the rmsd of unaligned part
|
| 151 |
+
transformed_pred_pose:
|
| 152 |
+
rot: optimal rotation
|
| 153 |
+
trans: optimal translation
|
| 154 |
+
"""
|
| 155 |
+
_, rot, translate = align_pred_to_true(
|
| 156 |
+
pred_pose,
|
| 157 |
+
true_pose,
|
| 158 |
+
atom_mask=atom_mask * align_mask,
|
| 159 |
+
weight=weight,
|
| 160 |
+
allowing_reflection=allowing_reflection,
|
| 161 |
+
)
|
| 162 |
+
transformed_pose = torch.matmul(pred_pose, rot.transpose(-1, -2)) + translate
|
| 163 |
+
err_atom = torch.square(transformed_pose - true_pose).sum(dim=-1) * atom_mask
|
| 164 |
+
aligned_mask, unaligned_mask = atom_mask * align_mask.float(), atom_mask * (
|
| 165 |
+
1 - align_mask.float()
|
| 166 |
+
)
|
| 167 |
+
aligned_part_err_square = (err_atom * aligned_mask).sum(dim=-1) / aligned_mask.sum(
|
| 168 |
+
dim=-1
|
| 169 |
+
)
|
| 170 |
+
unaligned_part_err_square = (err_atom * unaligned_mask).sum(
|
| 171 |
+
dim=-1
|
| 172 |
+
) / unaligned_mask.sum(dim=-1)
|
| 173 |
+
aligned_part_rmsd = aligned_part_err_square.add(eps).sqrt()
|
| 174 |
+
unaligned_part_rmsd = unaligned_part_err_square.add(eps).sqrt()
|
| 175 |
+
if reduce:
|
| 176 |
+
aligned_part_rmsd = aligned_part_rmsd.mean()
|
| 177 |
+
unaligned_part_rmsd = unaligned_part_rmsd.mean()
|
| 178 |
+
return aligned_part_rmsd, unaligned_part_rmsd, transformed_pose, rot, translate
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def self_aligned_rmsd(
|
| 182 |
+
pred_pose: torch.Tensor,
|
| 183 |
+
true_pose: torch.Tensor,
|
| 184 |
+
atom_mask: torch.Tensor,
|
| 185 |
+
eps: float = 0.0,
|
| 186 |
+
reduce: bool = True,
|
| 187 |
+
allowing_reflection: bool = False,
|
| 188 |
+
):
|
| 189 |
+
"""RMSD when aligning one molecule with ground truth and compute rmsd.
|
| 190 |
+
Arguments:
|
| 191 |
+
pred_pose: native predicted pose, [..., N,3]
|
| 192 |
+
true_pose: ground truth pose, [..., N, 3]
|
| 193 |
+
atom_mask: a mask representing which coordinates to compute loss [..., N]
|
| 194 |
+
eps: add a tolerance to avoid floating number issue in sqrt
|
| 195 |
+
reduce: decide the return shape of rmsd;
|
| 196 |
+
allowing_reflection: whether to allow reflection when finding optimal alignment
|
| 197 |
+
return:
|
| 198 |
+
aligned_rmsd: the rmsd of part being align_masked
|
| 199 |
+
transformed_pred_pose: the aligned pose
|
| 200 |
+
rot: optimal rotation matrix
|
| 201 |
+
trans: optimal translation
|
| 202 |
+
"""
|
| 203 |
+
aligned_rmsd, _, transformed_pred_pose, rot, trans = partially_aligned_rmsd(
|
| 204 |
+
pred_pose=pred_pose,
|
| 205 |
+
true_pose=true_pose,
|
| 206 |
+
align_mask=atom_mask,
|
| 207 |
+
atom_mask=atom_mask,
|
| 208 |
+
eps=eps,
|
| 209 |
+
reduce=reduce,
|
| 210 |
+
allowing_reflection=allowing_reflection,
|
| 211 |
+
)
|
| 212 |
+
return aligned_rmsd, transformed_pred_pose, rot, trans
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def weighted_rigid_align(
|
| 216 |
+
x: torch.Tensor,
|
| 217 |
+
x_target: torch.Tensor,
|
| 218 |
+
atom_weight: torch.Tensor,
|
| 219 |
+
stop_gradient: bool = True,
|
| 220 |
+
) -> tuple[torch.Tensor]:
|
| 221 |
+
"""Implements Algorithm 28 in AF3. Wrap `align_pred_to_true`.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
x (torch.Tensor): input coordinates, it will be moved to match x_target.
|
| 225 |
+
[..., N_atom, 3]
|
| 226 |
+
x_target (torch.Tensor): target coordinates for the input to match.
|
| 227 |
+
[..., N_atom, 3]
|
| 228 |
+
atom_weight (torch.Tensor): weights for each atom.
|
| 229 |
+
[..., N_atom] or [N_atom]
|
| 230 |
+
stop_gradient (bool): whether to detach the output. If true, will run it with no_grad() ctx.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
x_aligned (torch.Tensor): rotated, translated x which should be closer to x_target.
|
| 234 |
+
[..., N_atom, 3]
|
| 235 |
+
"""
|
| 236 |
+
|
| 237 |
+
if len(atom_weight.shape) == len(x.shape) - 1:
|
| 238 |
+
assert atom_weight.shape[:-1] == x.shape[:-2]
|
| 239 |
+
else:
|
| 240 |
+
assert len(atom_weight.shape) == 1 and atom_weight.shape[-1] == x.shape[-2]
|
| 241 |
+
|
| 242 |
+
if stop_gradient:
|
| 243 |
+
with torch.no_grad():
|
| 244 |
+
x_aligned, rot, trans = align_pred_to_true(
|
| 245 |
+
pred_pose=x,
|
| 246 |
+
true_pose=x_target,
|
| 247 |
+
atom_mask=None,
|
| 248 |
+
weight=atom_weight,
|
| 249 |
+
allowing_reflection=False,
|
| 250 |
+
)
|
| 251 |
+
return x_aligned.detach()
|
| 252 |
+
else:
|
| 253 |
+
x_aligned, rot, trans = align_pred_to_true(
|
| 254 |
+
pred_pose=x,
|
| 255 |
+
true_pose=x_target,
|
| 256 |
+
atom_mask=None,
|
| 257 |
+
weight=atom_weight,
|
| 258 |
+
allowing_reflection=False,
|
| 259 |
+
)
|
| 260 |
+
return x_aligned
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/generator.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Any, Callable, Optional
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
from protenix.model.utils import centre_random_augmentation
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class TrainingNoiseSampler:
|
| 23 |
+
"""
|
| 24 |
+
Sample the noise-level of of training samples
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
p_mean: float = -1.2,
|
| 30 |
+
p_std: float = 1.5,
|
| 31 |
+
sigma_data: float = 16.0, # NOTE: in EDM, this is 1.0
|
| 32 |
+
) -> None:
|
| 33 |
+
"""Sampler for training noise-level
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
p_mean (float, optional): gaussian mean. Defaults to -1.2.
|
| 37 |
+
p_std (float, optional): gaussian std. Defaults to 1.5.
|
| 38 |
+
sigma_data (float, optional): scale. Defaults to 16.0, but this is 1.0 in EDM.
|
| 39 |
+
"""
|
| 40 |
+
self.sigma_data = sigma_data
|
| 41 |
+
self.p_mean = p_mean
|
| 42 |
+
self.p_std = p_std
|
| 43 |
+
print(f"train scheduler {self.sigma_data}")
|
| 44 |
+
|
| 45 |
+
def __call__(
|
| 46 |
+
self, size: torch.Size, device: torch.device = torch.device("cpu")
|
| 47 |
+
) -> torch.Tensor:
|
| 48 |
+
"""Sampling
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
size (torch.Size): the target size
|
| 52 |
+
device (torch.device, optional): target device. Defaults to torch.device("cpu").
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
torch.Tensor: sampled noise-level
|
| 56 |
+
"""
|
| 57 |
+
rnd_normal = torch.randn(size=size, device=device)
|
| 58 |
+
noise_level = (rnd_normal * self.p_std + self.p_mean).exp() * self.sigma_data
|
| 59 |
+
return noise_level
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class InferenceNoiseScheduler:
|
| 63 |
+
"""
|
| 64 |
+
Scheduler for noise-level (time steps)
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def __init__(
|
| 68 |
+
self,
|
| 69 |
+
s_max: float = 160.0,
|
| 70 |
+
s_min: float = 4e-4,
|
| 71 |
+
rho: float = 7,
|
| 72 |
+
sigma_data: float = 16.0, # NOTE: in EDM, this is 1.0
|
| 73 |
+
) -> None:
|
| 74 |
+
"""Scheduler parameters
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
s_max (float, optional): maximal noise level. Defaults to 160.0.
|
| 78 |
+
s_min (float, optional): minimal noise level. Defaults to 4e-4.
|
| 79 |
+
rho (float, optional): the exponent numerical part. Defaults to 7.
|
| 80 |
+
sigma_data (float, optional): scale. Defaults to 16.0, but this is 1.0 in EDM.
|
| 81 |
+
"""
|
| 82 |
+
self.sigma_data = sigma_data
|
| 83 |
+
self.s_max = s_max
|
| 84 |
+
self.s_min = s_min
|
| 85 |
+
self.rho = rho
|
| 86 |
+
print(f"inference scheduler {self.sigma_data}")
|
| 87 |
+
|
| 88 |
+
def __call__(
|
| 89 |
+
self,
|
| 90 |
+
N_step: int = 200,
|
| 91 |
+
device: torch.device = torch.device("cpu"),
|
| 92 |
+
dtype: torch.dtype = torch.float32,
|
| 93 |
+
) -> torch.Tensor:
|
| 94 |
+
"""Schedule the noise-level (time steps). No sampling is performed.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
N_step (int, optional): number of time steps. Defaults to 200.
|
| 98 |
+
device (torch.device, optional): target device. Defaults to torch.device("cpu").
|
| 99 |
+
dtype (torch.dtype, optional): target dtype. Defaults to torch.float32.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
torch.Tensor: noise-level (time_steps)
|
| 103 |
+
[N_step+1]
|
| 104 |
+
"""
|
| 105 |
+
step_size = 1 / N_step
|
| 106 |
+
step_indices = torch.arange(N_step + 1, device=device, dtype=dtype)
|
| 107 |
+
t_step_list = (
|
| 108 |
+
self.sigma_data
|
| 109 |
+
* (
|
| 110 |
+
self.s_max ** (1 / self.rho)
|
| 111 |
+
+ step_indices
|
| 112 |
+
* step_size
|
| 113 |
+
* (self.s_min ** (1 / self.rho) - self.s_max ** (1 / self.rho))
|
| 114 |
+
)
|
| 115 |
+
** self.rho
|
| 116 |
+
)
|
| 117 |
+
# replace the last time step by 0
|
| 118 |
+
t_step_list[..., -1] = 0 # t_N = 0
|
| 119 |
+
|
| 120 |
+
return t_step_list
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def sample_diffusion(
|
| 124 |
+
denoise_net: Callable,
|
| 125 |
+
input_feature_dict: dict[str, Any],
|
| 126 |
+
s_inputs: torch.Tensor,
|
| 127 |
+
s_trunk: torch.Tensor,
|
| 128 |
+
z_trunk: torch.Tensor,
|
| 129 |
+
noise_schedule: torch.Tensor,
|
| 130 |
+
N_sample: int = 1,
|
| 131 |
+
gamma0: float = 0.8,
|
| 132 |
+
gamma_min: float = 1.0,
|
| 133 |
+
noise_scale_lambda: float = 1.003,
|
| 134 |
+
step_scale_eta: float = 1.5,
|
| 135 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 136 |
+
inplace_safe: bool = False,
|
| 137 |
+
attn_chunk_size: Optional[int] = None,
|
| 138 |
+
) -> torch.Tensor:
|
| 139 |
+
"""Implements Algorithm 18 in AF3.
|
| 140 |
+
It performances denoising steps from time 0 to time T.
|
| 141 |
+
The time steps (=noise levels) are given by noise_schedule.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
denoise_net (Callable): the network that performs the denoising step.
|
| 145 |
+
input_feature_dict (dict[str, Any]): input meta feature dict
|
| 146 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 147 |
+
[..., N_tokens, c_s_inputs]
|
| 148 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 149 |
+
[..., N_tokens, c_s]
|
| 150 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 151 |
+
[..., N_tokens, N_tokens, c_z]
|
| 152 |
+
noise_schedule (torch.Tensor): noise-level schedule (which is also the time steps) since sigma=t.
|
| 153 |
+
[N_iterations]
|
| 154 |
+
N_sample (int): number of generated samples
|
| 155 |
+
gamma0 (float): params in Alg.18.
|
| 156 |
+
gamma_min (float): params in Alg.18.
|
| 157 |
+
noise_scale_lambda (float): params in Alg.18.
|
| 158 |
+
step_scale_eta (float): params in Alg.18.
|
| 159 |
+
diffusion_chunk_size (Optional[int]): Chunk size for diffusion operation. Defaults to None.
|
| 160 |
+
inplace_safe (bool): Whether to use inplace operations safely. Defaults to False.
|
| 161 |
+
attn_chunk_size (Optional[int]): Chunk size for attention operation. Defaults to None.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
torch.Tensor: the denoised coordinates of x in inference stage
|
| 165 |
+
[..., N_sample, N_atom, 3]
|
| 166 |
+
"""
|
| 167 |
+
N_atom = input_feature_dict["atom_to_token_idx"].size(-1)
|
| 168 |
+
batch_shape = s_inputs.shape[:-2]
|
| 169 |
+
device = s_inputs.device
|
| 170 |
+
dtype = s_inputs.dtype
|
| 171 |
+
|
| 172 |
+
def _chunk_sample_diffusion(chunk_n_sample, inplace_safe):
|
| 173 |
+
# init noise
|
| 174 |
+
# [..., N_sample, N_atom, 3]
|
| 175 |
+
x_l = noise_schedule[0] * torch.randn(
|
| 176 |
+
size=(*batch_shape, chunk_n_sample, N_atom, 3), device=device, dtype=dtype
|
| 177 |
+
) # NOTE: set seed in distributed training
|
| 178 |
+
|
| 179 |
+
for _, (c_tau_last, c_tau) in enumerate(
|
| 180 |
+
zip(noise_schedule[:-1], noise_schedule[1:])
|
| 181 |
+
):
|
| 182 |
+
# [..., N_sample, N_atom, 3]
|
| 183 |
+
x_l = (
|
| 184 |
+
centre_random_augmentation(x_input_coords=x_l, N_sample=1)
|
| 185 |
+
.squeeze(dim=-3)
|
| 186 |
+
.to(dtype)
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# Denoise with a predictor-corrector sampler
|
| 190 |
+
# 1. Add noise to move x_{c_tau_last} to x_{t_hat}
|
| 191 |
+
gamma = float(gamma0) if c_tau > gamma_min else 0
|
| 192 |
+
t_hat = c_tau_last * (gamma + 1)
|
| 193 |
+
|
| 194 |
+
delta_noise_level = torch.sqrt(t_hat**2 - c_tau_last**2)
|
| 195 |
+
x_noisy = x_l + noise_scale_lambda * delta_noise_level * torch.randn(
|
| 196 |
+
size=x_l.shape, device=device, dtype=dtype
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# 2. Denoise from x_{t_hat} to x_{c_tau}
|
| 200 |
+
# Euler step only
|
| 201 |
+
t_hat = (
|
| 202 |
+
t_hat.reshape((1,) * (len(batch_shape) + 1))
|
| 203 |
+
.expand(*batch_shape, chunk_n_sample)
|
| 204 |
+
.to(dtype)
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
x_denoised = denoise_net(
|
| 208 |
+
x_noisy=x_noisy,
|
| 209 |
+
t_hat_noise_level=t_hat,
|
| 210 |
+
input_feature_dict=input_feature_dict,
|
| 211 |
+
s_inputs=s_inputs,
|
| 212 |
+
s_trunk=s_trunk,
|
| 213 |
+
z_trunk=z_trunk,
|
| 214 |
+
chunk_size=attn_chunk_size,
|
| 215 |
+
inplace_safe=inplace_safe,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
delta = (x_noisy - x_denoised) / t_hat[
|
| 219 |
+
..., None, None
|
| 220 |
+
] # Line 9 of AF3 uses 'x_l_hat' instead, which we believe is a typo.
|
| 221 |
+
dt = c_tau - t_hat
|
| 222 |
+
x_l = x_noisy + step_scale_eta * dt[..., None, None] * delta
|
| 223 |
+
|
| 224 |
+
return x_l
|
| 225 |
+
|
| 226 |
+
if diffusion_chunk_size is None:
|
| 227 |
+
x_l = _chunk_sample_diffusion(N_sample, inplace_safe=inplace_safe)
|
| 228 |
+
else:
|
| 229 |
+
x_l = []
|
| 230 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 231 |
+
N_sample % diffusion_chunk_size != 0
|
| 232 |
+
)
|
| 233 |
+
for i in range(no_chunks):
|
| 234 |
+
chunk_n_sample = (
|
| 235 |
+
diffusion_chunk_size
|
| 236 |
+
if i < no_chunks - 1
|
| 237 |
+
else N_sample - i * diffusion_chunk_size
|
| 238 |
+
)
|
| 239 |
+
chunk_x_l = _chunk_sample_diffusion(
|
| 240 |
+
chunk_n_sample, inplace_safe=inplace_safe
|
| 241 |
+
)
|
| 242 |
+
x_l.append(chunk_x_l)
|
| 243 |
+
x_l = torch.cat(x_l, -3) # [..., N_sample, N_atom, 3]
|
| 244 |
+
return x_l
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def sample_diffusion_training(
|
| 248 |
+
noise_sampler: TrainingNoiseSampler,
|
| 249 |
+
denoise_net: Callable,
|
| 250 |
+
label_dict: dict[str, Any],
|
| 251 |
+
input_feature_dict: dict[str, Any],
|
| 252 |
+
s_inputs: torch.Tensor,
|
| 253 |
+
s_trunk: torch.Tensor,
|
| 254 |
+
z_trunk: torch.Tensor,
|
| 255 |
+
N_sample: int = 1,
|
| 256 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 257 |
+
) -> tuple[torch.Tensor, ...]:
|
| 258 |
+
"""Implements diffusion training as described in AF3 Appendix at page 23.
|
| 259 |
+
It performances denoising steps from time 0 to time T.
|
| 260 |
+
The time steps (=noise levels) are given by noise_schedule.
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
denoise_net (Callable): the network that performs the denoising step.
|
| 264 |
+
label_dict (dict, optional) : a dictionary containing the followings.
|
| 265 |
+
"coordinate": the ground-truth coordinates
|
| 266 |
+
[..., N_atom, 3]
|
| 267 |
+
"coordinate_mask": whether true coordinates exist.
|
| 268 |
+
[..., N_atom]
|
| 269 |
+
input_feature_dict (dict[str, Any]): input meta feature dict
|
| 270 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 271 |
+
[..., N_tokens, c_s_inputs]
|
| 272 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 273 |
+
[..., N_tokens, c_s]
|
| 274 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 275 |
+
[..., N_tokens, N_tokens, c_z]
|
| 276 |
+
N_sample (int): number of training samples
|
| 277 |
+
Returns:
|
| 278 |
+
torch.Tensor: the denoised coordinates of x in inference stage
|
| 279 |
+
[..., N_sample, N_atom, 3]
|
| 280 |
+
"""
|
| 281 |
+
batch_size_shape = label_dict["coordinate"].shape[:-2]
|
| 282 |
+
device = label_dict["coordinate"].device
|
| 283 |
+
dtype = label_dict["coordinate"].dtype
|
| 284 |
+
# Areate N_sample versions of the input structure by randomly rotating and translating
|
| 285 |
+
x_gt_augment = centre_random_augmentation(
|
| 286 |
+
x_input_coords=label_dict["coordinate"],
|
| 287 |
+
N_sample=N_sample,
|
| 288 |
+
mask=label_dict["coordinate_mask"],
|
| 289 |
+
).to(
|
| 290 |
+
dtype
|
| 291 |
+
) # [..., N_sample, N_atom, 3]
|
| 292 |
+
|
| 293 |
+
# Add independent noise to each structure
|
| 294 |
+
# sigma: independent noise-level [..., N_sample]
|
| 295 |
+
sigma = noise_sampler(size=(*batch_size_shape, N_sample), device=device).to(dtype)
|
| 296 |
+
# noise: [..., N_sample, N_atom, 3]
|
| 297 |
+
noise = torch.randn_like(x_gt_augment, dtype=dtype) * sigma[..., None, None]
|
| 298 |
+
|
| 299 |
+
# Get denoising outputs [..., N_sample, N_atom, 3]
|
| 300 |
+
if diffusion_chunk_size is None:
|
| 301 |
+
x_denoised = denoise_net(
|
| 302 |
+
x_noisy=x_gt_augment + noise,
|
| 303 |
+
t_hat_noise_level=sigma,
|
| 304 |
+
input_feature_dict=input_feature_dict,
|
| 305 |
+
s_inputs=s_inputs,
|
| 306 |
+
s_trunk=s_trunk,
|
| 307 |
+
z_trunk=z_trunk,
|
| 308 |
+
)
|
| 309 |
+
else:
|
| 310 |
+
x_denoised = []
|
| 311 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 312 |
+
N_sample % diffusion_chunk_size != 0
|
| 313 |
+
)
|
| 314 |
+
for i in range(no_chunks):
|
| 315 |
+
x_noisy_i = (x_gt_augment + noise)[
|
| 316 |
+
..., i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size, :, :
|
| 317 |
+
]
|
| 318 |
+
t_hat_noise_level_i = sigma[
|
| 319 |
+
..., i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size
|
| 320 |
+
]
|
| 321 |
+
x_denoised_i = denoise_net(
|
| 322 |
+
x_noisy=x_noisy_i,
|
| 323 |
+
t_hat_noise_level=t_hat_noise_level_i,
|
| 324 |
+
input_feature_dict=input_feature_dict,
|
| 325 |
+
s_inputs=s_inputs,
|
| 326 |
+
s_trunk=s_trunk,
|
| 327 |
+
z_trunk=z_trunk,
|
| 328 |
+
)
|
| 329 |
+
x_denoised.append(x_denoised_i)
|
| 330 |
+
x_denoised = torch.cat(x_denoised, dim=-3)
|
| 331 |
+
|
| 332 |
+
return x_gt_augment, x_denoised, sigma
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Copyright 2021- HPC-AI Technology Inc.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http:#www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
from .layer_norm import FusedLayerNorm
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/compat.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// modified from https://github.com/NVIDIA/apex/blob/master/csrc/compat.h
|
| 2 |
+
// Copyright 2021- HPC-AI Technology Inc.
|
| 3 |
+
//
|
| 4 |
+
// Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
// you may not use this file except in compliance with the License.
|
| 6 |
+
// You may obtain a copy of the License at
|
| 7 |
+
//
|
| 8 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
//
|
| 10 |
+
// Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
// distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
// See the License for the specific language governing permissions and
|
| 14 |
+
// limitations under the License.
|
| 15 |
+
|
| 16 |
+
#ifndef TORCH_CHECK
|
| 17 |
+
#define TORCH_CHECK AT_CHECK
|
| 18 |
+
#endif
|
| 19 |
+
|
| 20 |
+
#ifdef VERSION_GE_1_3
|
| 21 |
+
#define DATA_PTR data_ptr
|
| 22 |
+
#else
|
| 23 |
+
#define DATA_PTR data
|
| 24 |
+
#endif
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/layer_norm_cuda.cpp
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright 2021- HPC-AI Technology Inc.
|
| 2 |
+
//
|
| 3 |
+
// Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
// you may not use this file except in compliance with the License.
|
| 5 |
+
// You may obtain a copy of the License at
|
| 6 |
+
//
|
| 7 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
//
|
| 9 |
+
// Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
// distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
// See the License for the specific language governing permissions and
|
| 13 |
+
|
| 14 |
+
#include <torch/extension.h>
|
| 15 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 16 |
+
|
| 17 |
+
#include <cassert>
|
| 18 |
+
#include <vector>
|
| 19 |
+
|
| 20 |
+
#include "compat.h"
|
| 21 |
+
|
| 22 |
+
void compute_n1_n2(at::Tensor input, at::IntArrayRef normalized_shape, int& n1, int& n2) {
|
| 23 |
+
int idiff = input.ndimension() - normalized_shape.size();
|
| 24 |
+
n2 = 1;
|
| 25 |
+
for (int i = 0; i < (int)normalized_shape.size(); ++i) {
|
| 26 |
+
assert(input.sizes()[i + idiff] == normalized_shape[i]);
|
| 27 |
+
n2 *= normalized_shape[i];
|
| 28 |
+
}
|
| 29 |
+
n1 = 1;
|
| 30 |
+
for (int i = 0; i < idiff; ++i) {
|
| 31 |
+
n1 *= input.sizes()[i];
|
| 32 |
+
}
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
void check_args(at::IntArrayRef normalized_shape, at::Tensor gamma, at::Tensor beta) {
|
| 36 |
+
TORCH_CHECK(!gamma.defined() || gamma.sizes().equals(normalized_shape));
|
| 37 |
+
TORCH_CHECK(!beta.defined() || beta.sizes().equals(normalized_shape));
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
void check_args(at::Tensor input, at::IntArrayRef normalized_shape, int& n1, int& n2) {
|
| 41 |
+
int64_t normalized_ndim = normalized_shape.size();
|
| 42 |
+
|
| 43 |
+
if (normalized_ndim < 1) {
|
| 44 |
+
std::stringstream ss;
|
| 45 |
+
ss << "Expected normalized_shape to be at least 1-dimensional, i.e., "
|
| 46 |
+
<< "containing at least one element, but got normalized_shape=" << normalized_shape;
|
| 47 |
+
throw std::runtime_error(ss.str());
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
auto input_shape = input.sizes();
|
| 51 |
+
auto input_ndim = input.dim();
|
| 52 |
+
|
| 53 |
+
if (input_ndim < normalized_ndim ||
|
| 54 |
+
!input_shape.slice(input_ndim - normalized_ndim).equals(normalized_shape)) {
|
| 55 |
+
std::stringstream ss;
|
| 56 |
+
ss << "Given normalized_shape=" << normalized_shape << ", expected input with shape [*";
|
| 57 |
+
for (auto size : normalized_shape) {
|
| 58 |
+
ss << ", " << size;
|
| 59 |
+
}
|
| 60 |
+
ss << "], but got input of size" << input_shape;
|
| 61 |
+
throw std::runtime_error(ss.str());
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
compute_n1_n2(input, normalized_shape, n1, n2);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
void check_args(at::Tensor input, at::IntArrayRef normalized_shape, at::Tensor gamma,
|
| 68 |
+
at::Tensor beta, int& n1, int& n2) {
|
| 69 |
+
check_args(input, normalized_shape, n1, n2);
|
| 70 |
+
check_args(normalized_shape, gamma, beta);
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
void cuda_layer_norm(at::Tensor* output, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input,
|
| 74 |
+
int n1, int n2, at::IntArrayRef normalized_shape, at::Tensor* gamma,
|
| 75 |
+
at::Tensor* beta, double epsilon);
|
| 76 |
+
|
| 77 |
+
#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
|
| 78 |
+
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
| 79 |
+
#define CHECK_INPUT(x) \
|
| 80 |
+
CHECK_CUDA(x); \
|
| 81 |
+
CHECK_CONTIGUOUS(x)
|
| 82 |
+
|
| 83 |
+
std::vector<at::Tensor> layer_norm_affine(at::Tensor input, at::IntArrayRef normalized_shape,
|
| 84 |
+
at::Tensor gamma, at::Tensor beta, double epsilon) {
|
| 85 |
+
CHECK_INPUT(input);
|
| 86 |
+
CHECK_INPUT(gamma);
|
| 87 |
+
CHECK_INPUT(beta);
|
| 88 |
+
int n1, n2;
|
| 89 |
+
check_args(input, normalized_shape, gamma, beta, n1, n2);
|
| 90 |
+
|
| 91 |
+
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
| 92 |
+
|
| 93 |
+
at::Tensor output = at::empty_like(input, gamma.options().dtype(gamma.scalar_type()));
|
| 94 |
+
at::Tensor mean = at::empty({n1}, input.options().dtype(at::ScalarType::Float));
|
| 95 |
+
at::Tensor invvar = at::empty_like(mean);
|
| 96 |
+
|
| 97 |
+
cuda_layer_norm(&output, &mean, &invvar, &input, n1, n2, normalized_shape, &gamma, &beta,
|
| 98 |
+
epsilon);
|
| 99 |
+
|
| 100 |
+
return {output, mean, invvar};
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
void cuda_layer_norm_gradient(at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar,
|
| 104 |
+
at::Tensor* input, int n1, int n2, at::IntArrayRef normalized_shape,
|
| 105 |
+
at::Tensor* gamma, at::Tensor* beta, double epsilon,
|
| 106 |
+
at::Tensor* grad_input, at::Tensor* grad_gamma,
|
| 107 |
+
at::Tensor* grad_beta);
|
| 108 |
+
|
| 109 |
+
std::vector<at::Tensor> layer_norm_gradient_affine(at::Tensor dout, at::Tensor mean,
|
| 110 |
+
at::Tensor invvar, at::Tensor input,
|
| 111 |
+
at::IntArrayRef normalized_shape,
|
| 112 |
+
at::Tensor gamma, at::Tensor beta,
|
| 113 |
+
double epsilon) {
|
| 114 |
+
CHECK_INPUT(dout);
|
| 115 |
+
CHECK_INPUT(mean);
|
| 116 |
+
CHECK_INPUT(invvar);
|
| 117 |
+
CHECK_INPUT(input);
|
| 118 |
+
CHECK_INPUT(gamma);
|
| 119 |
+
CHECK_INPUT(beta);
|
| 120 |
+
int n1, n2;
|
| 121 |
+
check_args(input, normalized_shape, gamma, beta, n1, n2);
|
| 122 |
+
|
| 123 |
+
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
| 124 |
+
|
| 125 |
+
at::Tensor grad_input = at::empty_like(input);
|
| 126 |
+
at::Tensor grad_gamma = at::empty_like(gamma);
|
| 127 |
+
at::Tensor grad_beta = at::empty_like(beta);
|
| 128 |
+
|
| 129 |
+
cuda_layer_norm_gradient(&dout, &mean, &invvar, &input, n1, n2, normalized_shape, &gamma, &beta,
|
| 130 |
+
epsilon, &grad_input, &grad_gamma, &grad_beta);
|
| 131 |
+
|
| 132 |
+
return {grad_input, grad_gamma, grad_beta};
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 136 |
+
m.def("forward_affine", &layer_norm_affine, "LayerNorm forward (CUDA)");
|
| 137 |
+
m.def("backward_affine", &layer_norm_gradient_affine, "LayerNorm backward (CUDA)");
|
| 138 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/layer_norm_cuda_kernel.cu
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
// Copyright 2020 The OneFlow Authors.
|
| 3 |
+
// Copyright 2021- HPC-AI Technology Inc.
|
| 4 |
+
//
|
| 5 |
+
// Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
// you may not use this file except in compliance with the License.
|
| 7 |
+
// You may obtain a copy of the License at
|
| 8 |
+
//
|
| 9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
//
|
| 11 |
+
// Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
// distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
// See the License for the specific language governing permissions and
|
| 15 |
+
// limitations under the License.
|
| 16 |
+
#include <cooperative_groups.h>
|
| 17 |
+
#include <cuda.h>
|
| 18 |
+
#include <cuda_runtime.h>
|
| 19 |
+
#include <torch/extension.h>
|
| 20 |
+
#include <iostream>
|
| 21 |
+
|
| 22 |
+
#include <THC/THCDeviceUtils.cuh>
|
| 23 |
+
|
| 24 |
+
#include "ATen/ATen.h"
|
| 25 |
+
#include "ATen/AccumulateType.h"
|
| 26 |
+
#include "ATen/cuda/CUDAContext.h"
|
| 27 |
+
#include "compat.h"
|
| 28 |
+
#include "type_shim.h"
|
| 29 |
+
|
| 30 |
+
#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
|
| 31 |
+
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
| 32 |
+
#define CHECK_INPUT(x) \
|
| 33 |
+
CHECK_CUDA(x); \
|
| 34 |
+
CHECK_CONTIGUOUS(x)
|
| 35 |
+
|
| 36 |
+
#define WarpNum 8
|
| 37 |
+
#define WarpSize 32
|
| 38 |
+
#define BlockSzie WarpNum*WarpSize
|
| 39 |
+
|
| 40 |
+
inline __device__ void WelfordOnline(float val, float* mean, float* m2, float* count) {
|
| 41 |
+
*count += 1;
|
| 42 |
+
float delta1 = val - *mean;
|
| 43 |
+
*mean += delta1 / (*count);
|
| 44 |
+
float delta2 = val - *mean;
|
| 45 |
+
*m2 += delta1 * delta2;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
inline __device__ void WelfordOnline(float b_mean, float b_m2, float b_count, float* mean,
|
| 49 |
+
float* m2, float* count) {
|
| 50 |
+
if (b_count == 0) {
|
| 51 |
+
return;
|
| 52 |
+
}
|
| 53 |
+
float new_count = *count + b_count;
|
| 54 |
+
float nb_n = b_count / new_count;
|
| 55 |
+
float delta = b_mean - *mean;
|
| 56 |
+
*mean += delta * nb_n;
|
| 57 |
+
*m2 += b_m2 + delta * delta * (*count) * nb_n;
|
| 58 |
+
*count = new_count;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
__inline__ __device__ void WelfordWarpAllReduce(float thread_mean, float thread_m2,
|
| 62 |
+
float thread_count, float* mean, float* m2,
|
| 63 |
+
float* count) {
|
| 64 |
+
*mean = thread_mean;
|
| 65 |
+
*m2 = thread_m2;
|
| 66 |
+
*count = thread_count;
|
| 67 |
+
for (int mask = 1; mask < 32; mask *= 2) {
|
| 68 |
+
float b_mean = __shfl_down_sync(0xffffffff, *mean, mask);
|
| 69 |
+
float b_m2 = __shfl_down_sync(0xffffffff, *m2, mask);
|
| 70 |
+
float b_count = __shfl_down_sync(0xffffffff, *count, mask);
|
| 71 |
+
WelfordOnline(b_mean, b_m2, b_count, mean, m2, count);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
*mean = __shfl_sync(0xffffffff, *mean, 0, 32);
|
| 75 |
+
*m2 = __shfl_sync(0xffffffff, *m2, 0, 32);
|
| 76 |
+
*count = __shfl_sync(0xffffffff, *count, 0, 32);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
extern __shared__ float shared_data[];
|
| 80 |
+
template <typename T>
|
| 81 |
+
__global__ void LayerNormForward(T* input, T* output, T* gamma, T* beta, float* mean,
|
| 82 |
+
float* invvar, int rows, int cols, double epsilon) {
|
| 83 |
+
int warp_id = threadIdx.x / WarpSize;
|
| 84 |
+
int lane_id = threadIdx.x % WarpSize;
|
| 85 |
+
int row_offset = blockIdx.x * WarpNum + warp_id;
|
| 86 |
+
|
| 87 |
+
float* shared_data_warp = shared_data + warp_id*cols;
|
| 88 |
+
|
| 89 |
+
if (row_offset < rows) {
|
| 90 |
+
T* row_input = input + (long long)(row_offset) * (long long)(cols); // Starting point for input data
|
| 91 |
+
T* row_output = output + (long long)(row_offset) * (long long)(cols); // Starting point for output data
|
| 92 |
+
|
| 93 |
+
float thread_mean = 0.f;
|
| 94 |
+
float thread_m2 = 0.f;
|
| 95 |
+
float thread_count = 0.f;
|
| 96 |
+
|
| 97 |
+
float warp_mean;
|
| 98 |
+
float warp_m2;
|
| 99 |
+
float warp_count;
|
| 100 |
+
// load data to shared memory
|
| 101 |
+
#pragma unroll
|
| 102 |
+
for(int idx = lane_id; idx < cols; idx += WarpSize) {
|
| 103 |
+
shared_data_warp[idx] = static_cast<float>(row_input[idx]);
|
| 104 |
+
WelfordOnline(shared_data_warp[idx], &thread_mean, &thread_m2, &thread_count);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
WelfordWarpAllReduce(thread_mean, thread_m2, thread_count, &warp_mean, &warp_m2,
|
| 108 |
+
&warp_count);
|
| 109 |
+
|
| 110 |
+
float row_mean = warp_mean;
|
| 111 |
+
float row_variance = max(warp_m2 / warp_count, 0.f);
|
| 112 |
+
float row_inv_var = rsqrt(row_variance + epsilon);
|
| 113 |
+
if (lane_id == 0) {
|
| 114 |
+
mean[row_offset] = row_mean;
|
| 115 |
+
invvar[row_offset] = row_inv_var;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
#pragma unroll
|
| 119 |
+
for(int idx = lane_id; idx < cols; idx += WarpSize) {
|
| 120 |
+
row_output[idx] = static_cast<T>((shared_data_warp[idx] - row_mean) * row_inv_var) * gamma[idx] + beta[idx];
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
void cuda_layer_norm(at::Tensor* output, at::Tensor* mean, at::Tensor* invvar, at::Tensor* input,
|
| 126 |
+
int rows, int cols, at::IntArrayRef normalized_shape, at::Tensor* gamma,
|
| 127 |
+
at::Tensor* beta, double epsilon) {
|
| 128 |
+
int grid = (rows + WarpNum - 1) / WarpNum; // each warp process one line
|
| 129 |
+
dim3 block(BlockSzie);
|
| 130 |
+
// add shared memory size
|
| 131 |
+
int shared_meory_size = WarpNum*sizeof(float)*cols;
|
| 132 |
+
if (output->dtype() == torch::kFloat32) {
|
| 133 |
+
LayerNormForward<float><<<grid, block, shared_meory_size>>>(
|
| 134 |
+
(float*)input->data_ptr(), (float*)output->data_ptr(), (float*)gamma->data_ptr(),
|
| 135 |
+
(float*)beta->data_ptr(), (float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows,
|
| 136 |
+
cols, epsilon);
|
| 137 |
+
} else if (output->dtype() == torch::kFloat16) {
|
| 138 |
+
LayerNormForward<at::Half><<<grid, block, shared_meory_size>>>(
|
| 139 |
+
(at::Half*)input->data_ptr(), (at::Half*)output->data_ptr(),
|
| 140 |
+
(at::Half*)gamma->data_ptr(), (at::Half*)beta->data_ptr(), (float*)mean->data_ptr(),
|
| 141 |
+
(float*)invvar->data_ptr(), rows, cols, epsilon);
|
| 142 |
+
} else if (output->dtype() == torch::kBFloat16) {
|
| 143 |
+
LayerNormForward<at::BFloat16><<<grid, block, shared_meory_size>>>(
|
| 144 |
+
(at::BFloat16*)input->data_ptr(), (at::BFloat16*)output->data_ptr(),
|
| 145 |
+
(at::BFloat16*)gamma->data_ptr(), (at::BFloat16*)beta->data_ptr(),
|
| 146 |
+
(float*)mean->data_ptr(), (float*)invvar->data_ptr(), rows, cols, epsilon);
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
template <typename T>
|
| 151 |
+
struct SharedMemory;
|
| 152 |
+
|
| 153 |
+
template <>
|
| 154 |
+
struct SharedMemory<float> {
|
| 155 |
+
__device__ float* getPointer() {
|
| 156 |
+
extern __shared__ float s_float[];
|
| 157 |
+
return s_float;
|
| 158 |
+
}
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
template<typename T>
|
| 162 |
+
__inline__ __device__ T WarpReduce(T val) {
|
| 163 |
+
for (int mask = 16; mask > 0; mask /= 2) { val += __shfl_xor_sync(0xffffffff, val, mask); }
|
| 164 |
+
return val;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
constexpr int tile_size = 32;
|
| 168 |
+
constexpr int num_per_block = 4;
|
| 169 |
+
constexpr int block_dim_x = 32;
|
| 170 |
+
constexpr int block_dim_y = 32 / num_per_block;
|
| 171 |
+
|
| 172 |
+
template <typename T, typename U, typename V>
|
| 173 |
+
__global__ void LayerNormParamGradStep1(int rows, int cols, const V* __restrict__ dy,
|
| 174 |
+
const T* __restrict__ x, const U* __restrict__ mean,
|
| 175 |
+
const U* __restrict__ inv_var,
|
| 176 |
+
U* __restrict__ tmp_gamma_diff, U* __restrict__ tmp_beta_diff) {
|
| 177 |
+
__shared__ U dgamma[32][33];
|
| 178 |
+
__shared__ U dbeta[32][33];
|
| 179 |
+
U dgamma_sum[num_per_block];
|
| 180 |
+
U dbeta_sum[num_per_block];
|
| 181 |
+
#pragma unroll
|
| 182 |
+
for (int index = 0; index < num_per_block; ++index) {
|
| 183 |
+
dgamma_sum[index] = 0;
|
| 184 |
+
dbeta_sum[index] = 0;
|
| 185 |
+
}
|
| 186 |
+
const int col_id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 187 |
+
if (col_id < cols) {
|
| 188 |
+
for (int i = blockIdx.y * tile_size + threadIdx.y; i < rows; i += tile_size * gridDim.y) {
|
| 189 |
+
#pragma unroll
|
| 190 |
+
for (int index = 0; index < num_per_block; ++index) {
|
| 191 |
+
int row_id = i + index * blockDim.y;
|
| 192 |
+
if (row_id < rows) {
|
| 193 |
+
int offset = row_id * cols + col_id;
|
| 194 |
+
const U dy_val = static_cast<U>(dy[offset]);
|
| 195 |
+
const U x_val = static_cast<U>(x[offset]);
|
| 196 |
+
const U mean_val = mean[row_id];
|
| 197 |
+
const U inv_var_val = inv_var[row_id];
|
| 198 |
+
dgamma_sum[index] += dy_val * (x_val - mean_val) * inv_var_val;
|
| 199 |
+
dbeta_sum[index] += dy_val;
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
}
|
| 204 |
+
#pragma unroll
|
| 205 |
+
for (int index = 0; index < num_per_block; ++index) {
|
| 206 |
+
dgamma[index * blockDim.y + threadIdx.y][threadIdx.x] = dgamma_sum[index];
|
| 207 |
+
dbeta[index * blockDim.y + threadIdx.y][threadIdx.x] = dbeta_sum[index];
|
| 208 |
+
}
|
| 209 |
+
__syncthreads();
|
| 210 |
+
#pragma unroll
|
| 211 |
+
for (int index = 0; index < num_per_block; ++index) {
|
| 212 |
+
const int col_id = blockIdx.x * blockDim.x + threadIdx.y + index * blockDim.y;
|
| 213 |
+
if (col_id < cols) {
|
| 214 |
+
U gamma_sum = dgamma[threadIdx.x][threadIdx.y + index * blockDim.y];
|
| 215 |
+
U beta_sum = dbeta[threadIdx.x][threadIdx.y + index * blockDim.y];
|
| 216 |
+
U global_dgamma = WarpReduce<U>(gamma_sum);
|
| 217 |
+
U global_dbeta = WarpReduce<U>(beta_sum);
|
| 218 |
+
if (threadIdx.x == 0) {
|
| 219 |
+
const int offset = blockIdx.y * cols + col_id;
|
| 220 |
+
tmp_gamma_diff[offset] = global_dgamma;
|
| 221 |
+
tmp_beta_diff[offset] = global_dbeta;
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
template <typename U, typename V>
|
| 228 |
+
__global__ void LayerNormParamGradStep2(const U* part_grad_gamma, const U* part_grad_beta,
|
| 229 |
+
const int part_size, const int n1, const int n2,
|
| 230 |
+
V* grad_gamma, V* grad_beta) {
|
| 231 |
+
// sum partial gradients for gamma and beta
|
| 232 |
+
SharedMemory<U> shared;
|
| 233 |
+
U* buf = shared.getPointer();
|
| 234 |
+
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
|
| 235 |
+
if (i2 < n2) {
|
| 236 |
+
// each warp does sequential reductions until reduced part_size is num_warps
|
| 237 |
+
// int num_warp_reductions = part_size / blockDim.y;
|
| 238 |
+
U sum_gamma = U(0);
|
| 239 |
+
U sum_beta = U(0);
|
| 240 |
+
const U* part_grad_gamma_ptr = part_grad_gamma + i2;
|
| 241 |
+
const U* part_grad_beta_ptr = part_grad_beta + i2;
|
| 242 |
+
for (int row_idx = threadIdx.y; row_idx < part_size; row_idx += blockDim.y) {
|
| 243 |
+
sum_gamma += part_grad_gamma_ptr[row_idx * n2];
|
| 244 |
+
sum_beta += part_grad_beta_ptr[row_idx * n2];
|
| 245 |
+
}
|
| 246 |
+
// inter-warp reductions
|
| 247 |
+
const int nbsize3 = blockDim.x * blockDim.y / 2;
|
| 248 |
+
for (int offset = blockDim.y / 2; offset >= 1; offset /= 2) {
|
| 249 |
+
// top half write to shared memory
|
| 250 |
+
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
|
| 251 |
+
const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
|
| 252 |
+
buf[write_idx] = sum_gamma;
|
| 253 |
+
buf[write_idx + nbsize3] = sum_beta;
|
| 254 |
+
}
|
| 255 |
+
__syncthreads();
|
| 256 |
+
// bottom half sums
|
| 257 |
+
if (threadIdx.y < offset) {
|
| 258 |
+
const int read_idx = threadIdx.y * blockDim.x + threadIdx.x;
|
| 259 |
+
sum_gamma += buf[read_idx];
|
| 260 |
+
sum_beta += buf[read_idx + nbsize3];
|
| 261 |
+
}
|
| 262 |
+
__syncthreads();
|
| 263 |
+
}
|
| 264 |
+
// write out fully summed gradients
|
| 265 |
+
if (threadIdx.y == 0) {
|
| 266 |
+
grad_gamma[i2] = sum_gamma;
|
| 267 |
+
grad_beta[i2] = sum_beta;
|
| 268 |
+
}
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template <typename T, typename U, typename V>
|
| 273 |
+
__global__ void LayerNormInputGrad(const V* __restrict__ dout, const T* __restrict__ input,
|
| 274 |
+
const int rows, const int cols, const U* __restrict__ mean,
|
| 275 |
+
const U* __restrict__ invvar, U epsilon, const V* gamma,
|
| 276 |
+
T* grad_input) {
|
| 277 |
+
int WarpPerBlock = blockDim.x / WarpSize;
|
| 278 |
+
int thread_idx = threadIdx.x;
|
| 279 |
+
int warp_idx = thread_idx / WarpSize;
|
| 280 |
+
int lane_idx = thread_idx % WarpSize;
|
| 281 |
+
|
| 282 |
+
float* shared_dout = shared_data + warp_idx*cols;
|
| 283 |
+
float* shared_input = shared_data + WarpPerBlock*cols + warp_idx*cols;
|
| 284 |
+
float* shared_gamma = shared_data + 2*WarpPerBlock*cols;
|
| 285 |
+
int row_stride = gridDim.x*WarpPerBlock;
|
| 286 |
+
for(int row = blockIdx.x*WarpPerBlock+warp_idx; row < rows; row += row_stride) {
|
| 287 |
+
U mean_r = mean[row];
|
| 288 |
+
U invvar_r = invvar[row];
|
| 289 |
+
// load dout, input and gamma
|
| 290 |
+
long long data_offset = (long long)(row) * cols;
|
| 291 |
+
const V* dout_r = dout + data_offset;
|
| 292 |
+
const T* input_r = input + data_offset;
|
| 293 |
+
T* grad_input_r = grad_input + data_offset;
|
| 294 |
+
#pragma unroll
|
| 295 |
+
for(int col = lane_idx; col < cols; col += WarpSize) {
|
| 296 |
+
shared_dout[col] = float(dout_r[col]);
|
| 297 |
+
shared_input[col] = float(input_r[col]);
|
| 298 |
+
}
|
| 299 |
+
if(warp_idx == 0) {
|
| 300 |
+
#pragma unroll
|
| 301 |
+
for(int col = lane_idx; col < cols; col += WarpSize) {
|
| 302 |
+
shared_gamma[col] = float(gamma[col]);
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
+
__syncthreads();
|
| 306 |
+
|
| 307 |
+
float gamma_dout = 0.0;
|
| 308 |
+
float gamma_dout_input_mean = 0.0;
|
| 309 |
+
// reduction, gamma*dout and gamma*dout*(input-mean)
|
| 310 |
+
#pragma unroll
|
| 311 |
+
for(int col = lane_idx; col < cols; col += WarpSize) {
|
| 312 |
+
float temp = shared_gamma[col] * shared_dout[col];
|
| 313 |
+
gamma_dout += temp;
|
| 314 |
+
gamma_dout_input_mean += temp * (shared_input[col] - mean_r);
|
| 315 |
+
}
|
| 316 |
+
float global_gamma_dout = WarpReduce<float>(gamma_dout);
|
| 317 |
+
float global_gamma_dout_input_mean = WarpReduce<float>(gamma_dout_input_mean);
|
| 318 |
+
|
| 319 |
+
float part3_temp_value = global_gamma_dout_input_mean * invvar_r * invvar_r * invvar_r / cols;
|
| 320 |
+
float part2 = global_gamma_dout * invvar_r / cols;
|
| 321 |
+
#pragma unroll
|
| 322 |
+
for(int col = lane_idx; col < cols; col += WarpSize) {
|
| 323 |
+
float part1 = shared_gamma[col] * shared_dout[col] * invvar_r;
|
| 324 |
+
float part3 = (shared_input[col] - mean_r) * part3_temp_value;
|
| 325 |
+
grad_input_r[col] = part1 - part2 - part3;
|
| 326 |
+
}
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
template <typename T, typename U, typename V>
|
| 331 |
+
int GetGirdDimY(const int64_t num_instances, const int64_t norm_size) {
|
| 332 |
+
const int grid_dim_x = (norm_size + tile_size - 1) / tile_size;
|
| 333 |
+
const int max_grid_dim_y = (num_instances + tile_size - 1) / tile_size;
|
| 334 |
+
const int block_size = block_dim_x * block_dim_y;
|
| 335 |
+
int max_active_blocks = 0;
|
| 336 |
+
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
|
| 337 |
+
&max_active_blocks, LayerNormParamGradStep1<T, U, V>, block_size, 0);
|
| 338 |
+
int waves = 1;
|
| 339 |
+
int dev;
|
| 340 |
+
cudaGetDevice(&dev);
|
| 341 |
+
int sm_count;
|
| 342 |
+
cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, dev);
|
| 343 |
+
int num_blocks = max_active_blocks * sm_count * waves;
|
| 344 |
+
int grid_dim_y = std::min(max_grid_dim_y, static_cast<int>(num_blocks / grid_dim_x));
|
| 345 |
+
return std::max(grid_dim_y, 1);
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
template <typename T, typename U, typename V>
|
| 349 |
+
void HostLayerNormGradient(const V* dout, const U* mean, const U* invvar, at::Tensor* input, int n1,
|
| 350 |
+
int n2, const V* gamma, const V* beta, double epsilon, T* grad_input,
|
| 351 |
+
V* grad_gamma, V* grad_beta) {
|
| 352 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 353 |
+
|
| 354 |
+
if (gamma != NULL && beta != NULL) {
|
| 355 |
+
// compute grad_gamma(j) and grad_beta(j)
|
| 356 |
+
const int part_size = GetGirdDimY<T, U, V>(n1, n2);
|
| 357 |
+
const int grid_dim_x = (n2 + tile_size - 1) / tile_size;
|
| 358 |
+
const int grid_dim_y = part_size;
|
| 359 |
+
|
| 360 |
+
at::Tensor part_grad_gamma = at::empty({part_size, n2}, input->options().dtype(at::ScalarType::Float));
|
| 361 |
+
at::Tensor part_grad_beta = at::empty_like(part_grad_gamma);
|
| 362 |
+
LayerNormParamGradStep1<T, U, V><<<dim3(grid_dim_x, grid_dim_y), dim3(32, 32 / num_per_block)>>>(
|
| 363 |
+
n1, n2, dout, input->DATA_PTR<T>(), mean, invvar, part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>()
|
| 364 |
+
);
|
| 365 |
+
|
| 366 |
+
const dim3 threads3(32, 8, 1);
|
| 367 |
+
const dim3 blocks3((n2 + 32 - 1) / 32, 1, 1);
|
| 368 |
+
const int nshared3 = threads3.x * threads3.y * sizeof(U);
|
| 369 |
+
LayerNormParamGradStep2<<<blocks3, threads3, nshared3, stream>>>(
|
| 370 |
+
part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size, n1, n2,
|
| 371 |
+
grad_gamma, grad_beta);
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
const uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
|
| 375 |
+
#define BlockDim 128
|
| 376 |
+
int WarpNumPerBlock = BlockDim / WarpSize;
|
| 377 |
+
const dim3 threads1(BlockDim);
|
| 378 |
+
int nshared = sizeof(float)*n2*(WarpNumPerBlock*2 + 1);
|
| 379 |
+
|
| 380 |
+
int max_active_blocks = 0;
|
| 381 |
+
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
|
| 382 |
+
&max_active_blocks, LayerNormInputGrad<T, U, V>, BlockDim, nshared);
|
| 383 |
+
int dev;
|
| 384 |
+
cudaGetDevice(&dev);
|
| 385 |
+
int sm_count;
|
| 386 |
+
cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, dev);
|
| 387 |
+
|
| 388 |
+
const dim3 blocks1(std::min((uint64_t)((n1 + WarpNumPerBlock - 1)/WarpNumPerBlock), (uint64_t)(max_active_blocks * sm_count)));
|
| 389 |
+
LayerNormInputGrad<<<blocks1, threads1, nshared>>>(dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), gamma, grad_input);
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
void cuda_layer_norm_gradient(at::Tensor* dout, at::Tensor* mean, at::Tensor* invvar,
|
| 393 |
+
at::Tensor* input, int n1, int n2, at::IntArrayRef normalized_shape,
|
| 394 |
+
at::Tensor* gamma, at::Tensor* beta, double epsilon,
|
| 395 |
+
at::Tensor* grad_input, at::Tensor* grad_gamma,
|
| 396 |
+
at::Tensor* grad_beta) {
|
| 397 |
+
using namespace at;
|
| 398 |
+
DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(
|
| 399 |
+
input->scalar_type(), gamma->scalar_type(), "cuda_layer_norm_gradient_kernel",
|
| 400 |
+
HostLayerNormGradient(dout->DATA_PTR<scalar_t_out>(), mean->DATA_PTR<float>(),
|
| 401 |
+
invvar->DATA_PTR<float>(), input, n1, n2,
|
| 402 |
+
// TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta
|
| 403 |
+
// if gamma Tensor is NULL on input.
|
| 404 |
+
gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL,
|
| 405 |
+
gamma != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL, epsilon,
|
| 406 |
+
grad_input->DATA_PTR<scalar_t_in>(),
|
| 407 |
+
gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_out>() : NULL,
|
| 408 |
+
gamma != NULL ? grad_beta->DATA_PTR<scalar_t_out>() : NULL);)
|
| 409 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/kernel/type_shim.h
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// modified from https://github.com/NVIDIA/apex
|
| 2 |
+
// Copyright 2021- HPC-AI Technology Inc.
|
| 3 |
+
//
|
| 4 |
+
// Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
// you may not use this file except in compliance with the License.
|
| 6 |
+
// You may obtain a copy of the License at
|
| 7 |
+
//
|
| 8 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
//
|
| 10 |
+
// Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
// distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
// See the License for the specific language governing permissions and
|
| 14 |
+
// limitations under the License.
|
| 15 |
+
|
| 16 |
+
#include <ATen/ATen.h>
|
| 17 |
+
|
| 18 |
+
#include "compat.h"
|
| 19 |
+
|
| 20 |
+
#define DISPATCH_HALF_AND_BFLOAT(TYPE, NAME, ...) \
|
| 21 |
+
switch (TYPE) { \
|
| 22 |
+
case at::ScalarType::Half: { \
|
| 23 |
+
using scalar_t = at::Half; \
|
| 24 |
+
__VA_ARGS__; \
|
| 25 |
+
break; \
|
| 26 |
+
} \
|
| 27 |
+
case at::ScalarType::BFloat16: { \
|
| 28 |
+
using scalar_t = at::BFloat16; \
|
| 29 |
+
__VA_ARGS__; \
|
| 30 |
+
break; \
|
| 31 |
+
} \
|
| 32 |
+
default: \
|
| 33 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \
|
| 37 |
+
switch (TYPEIN) { \
|
| 38 |
+
case at::ScalarType::Float: { \
|
| 39 |
+
using scalar_t_in = float; \
|
| 40 |
+
switch (TYPEOUT) { \
|
| 41 |
+
case at::ScalarType::Float: { \
|
| 42 |
+
using scalar_t_out = float; \
|
| 43 |
+
__VA_ARGS__; \
|
| 44 |
+
break; \
|
| 45 |
+
} \
|
| 46 |
+
case at::ScalarType::Half: { \
|
| 47 |
+
using scalar_t_out = at::Half; \
|
| 48 |
+
__VA_ARGS__; \
|
| 49 |
+
break; \
|
| 50 |
+
} \
|
| 51 |
+
case at::ScalarType::BFloat16: { \
|
| 52 |
+
using scalar_t_out = at::BFloat16; \
|
| 53 |
+
__VA_ARGS__; \
|
| 54 |
+
break; \
|
| 55 |
+
} \
|
| 56 |
+
default: \
|
| 57 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \
|
| 58 |
+
} \
|
| 59 |
+
break; \
|
| 60 |
+
} \
|
| 61 |
+
case at::ScalarType::Half: { \
|
| 62 |
+
using scalar_t_in = at::Half; \
|
| 63 |
+
using scalar_t_out = at::Half; \
|
| 64 |
+
__VA_ARGS__; \
|
| 65 |
+
break; \
|
| 66 |
+
} \
|
| 67 |
+
case at::ScalarType::BFloat16: { \
|
| 68 |
+
using scalar_t_in = at::BFloat16; \
|
| 69 |
+
using scalar_t_out = at::BFloat16; \
|
| 70 |
+
__VA_ARGS__; \
|
| 71 |
+
break; \
|
| 72 |
+
} \
|
| 73 |
+
default: \
|
| 74 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
// Forward/backward compatiblity hack around
|
| 78 |
+
// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288
|
| 79 |
+
// pending more future-proof guidance from upstream.
|
| 80 |
+
// struct TypeShim
|
| 81 |
+
// {
|
| 82 |
+
// const at::Type& payload;
|
| 83 |
+
// TypeShim(const at::Type& type) : payload(type) {}
|
| 84 |
+
// // Enable trivial conversion to a const at::Type& for pre-3aeb78
|
| 85 |
+
// operator const at::Type&(){ return payload; };
|
| 86 |
+
// // Enable dispatch switch statements to take *this directly for post-3aeb78
|
| 87 |
+
// //operator at::ScalarType(){ return payload.; };
|
| 88 |
+
// };
|
| 89 |
+
|
| 90 |
+
#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
|
| 91 |
+
switch (TYPE) { \
|
| 92 |
+
case at::ScalarType::Float: { \
|
| 93 |
+
using scalar_t_##LEVEL = float; \
|
| 94 |
+
__VA_ARGS__; \
|
| 95 |
+
break; \
|
| 96 |
+
} \
|
| 97 |
+
case at::ScalarType::Half: { \
|
| 98 |
+
using scalar_t_##LEVEL = at::Half; \
|
| 99 |
+
__VA_ARGS__; \
|
| 100 |
+
break; \
|
| 101 |
+
} \
|
| 102 |
+
default: \
|
| 103 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
#define DISPATCH_FLOAT_HALF_AND_BYTE(TYPE, LEVEL, NAME, ...) \
|
| 107 |
+
switch (TYPE) { \
|
| 108 |
+
case at::ScalarType::Float: { \
|
| 109 |
+
using scalar_t_##LEVEL = float; \
|
| 110 |
+
__VA_ARGS__; \
|
| 111 |
+
break; \
|
| 112 |
+
} \
|
| 113 |
+
case at::ScalarType::Half: { \
|
| 114 |
+
using scalar_t_##LEVEL = at::Half; \
|
| 115 |
+
__VA_ARGS__; \
|
| 116 |
+
break; \
|
| 117 |
+
} \
|
| 118 |
+
case at::ScalarType::Byte: { \
|
| 119 |
+
using scalar_t_##LEVEL = uint8_t; \
|
| 120 |
+
__VA_ARGS__; \
|
| 121 |
+
break; \
|
| 122 |
+
} \
|
| 123 |
+
default: \
|
| 124 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
|
| 128 |
+
switch (TYPE) { \
|
| 129 |
+
case at::ScalarType::Double: { \
|
| 130 |
+
using scalar_t_##LEVEL = double; \
|
| 131 |
+
__VA_ARGS__; \
|
| 132 |
+
break; \
|
| 133 |
+
} \
|
| 134 |
+
case at::ScalarType::Float: { \
|
| 135 |
+
using scalar_t_##LEVEL = float; \
|
| 136 |
+
__VA_ARGS__; \
|
| 137 |
+
break; \
|
| 138 |
+
} \
|
| 139 |
+
case at::ScalarType::Half: { \
|
| 140 |
+
using scalar_t_##LEVEL = at::Half; \
|
| 141 |
+
__VA_ARGS__; \
|
| 142 |
+
break; \
|
| 143 |
+
} \
|
| 144 |
+
default: \
|
| 145 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \
|
| 149 |
+
switch (TYPE) { \
|
| 150 |
+
case at::ScalarType::Double: { \
|
| 151 |
+
using scalar_t_##LEVEL = double; \
|
| 152 |
+
__VA_ARGS__; \
|
| 153 |
+
break; \
|
| 154 |
+
} \
|
| 155 |
+
case at::ScalarType::Float: { \
|
| 156 |
+
using scalar_t_##LEVEL = float; \
|
| 157 |
+
__VA_ARGS__; \
|
| 158 |
+
break; \
|
| 159 |
+
} \
|
| 160 |
+
default: \
|
| 161 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template <typename T>
|
| 165 |
+
__device__ __forceinline__ T
|
| 166 |
+
reduce_block_into_lanes(T *x, T val, int lanes = 1,
|
| 167 |
+
bool share_result = false) // lanes is intended to be <= 32.
|
| 168 |
+
{
|
| 169 |
+
int tid = threadIdx.x + threadIdx.y * blockDim.x;
|
| 170 |
+
int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32.
|
| 171 |
+
|
| 172 |
+
if (blockSize >= 64) {
|
| 173 |
+
x[tid] = val;
|
| 174 |
+
__syncthreads();
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
#pragma unroll
|
| 178 |
+
for (int i = (blockSize >> 1); i >= 64; i >>= 1) {
|
| 179 |
+
if (tid < i) x[tid] = x[tid] + x[tid + i];
|
| 180 |
+
__syncthreads();
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
T final;
|
| 184 |
+
|
| 185 |
+
if (tid < 32) {
|
| 186 |
+
if (blockSize >= 64)
|
| 187 |
+
final = x[tid] + x[tid + 32];
|
| 188 |
+
else
|
| 189 |
+
final = val;
|
| 190 |
+
// __SYNCWARP();
|
| 191 |
+
|
| 192 |
+
#pragma unroll
|
| 193 |
+
for (int i = 16; i >= lanes; i >>= 1)
|
| 194 |
+
final = final + __shfl_down_sync(0xffffffff, final, i);
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
if (share_result) {
|
| 198 |
+
if (tid < lanes) x[tid] = final; // EpilogueOp
|
| 199 |
+
// Make sure the smem result is visible to all warps.
|
| 200 |
+
__syncthreads();
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
return final;
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
template <typename T>
|
| 207 |
+
__device__ __forceinline__ T
|
| 208 |
+
reduce_block_into_lanes_max_op(T *x, T val, int lanes = 1,
|
| 209 |
+
bool share_result = false) // lanes is intended to be <= 32.
|
| 210 |
+
{
|
| 211 |
+
int tid = threadIdx.x + threadIdx.y * blockDim.x;
|
| 212 |
+
int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32.
|
| 213 |
+
|
| 214 |
+
if (blockSize >= 64) {
|
| 215 |
+
x[tid] = val;
|
| 216 |
+
__syncthreads();
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
#pragma unroll
|
| 220 |
+
for (int i = (blockSize >> 1); i >= 64; i >>= 1) {
|
| 221 |
+
if (tid < i) x[tid] = fmaxf(fabsf(x[tid]), fabsf(x[tid + i]));
|
| 222 |
+
__syncthreads();
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
T final;
|
| 226 |
+
|
| 227 |
+
if (tid < 32) {
|
| 228 |
+
if (blockSize >= 64)
|
| 229 |
+
final = fmaxf(fabsf(x[tid]), fabsf(x[tid + 32]));
|
| 230 |
+
else
|
| 231 |
+
final = val;
|
| 232 |
+
// __SYNCWARP();
|
| 233 |
+
|
| 234 |
+
#pragma unroll
|
| 235 |
+
for (int i = 16; i >= lanes; i >>= 1)
|
| 236 |
+
final = fmaxf(fabsf(final), fabsf(__shfl_down_sync(0xffffffff, final, i)));
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
if (share_result) {
|
| 240 |
+
if (tid < lanes) x[tid] = final; // EpilogueOp
|
| 241 |
+
// Make sure the smem result is visible to all warps.
|
| 242 |
+
__syncthreads();
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
return final;
|
| 246 |
+
}
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/layer_norm.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Copyright 2021- HPC-AI Technology Inc.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http:#www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
import importlib
|
| 17 |
+
import numbers
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
import time
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from torch.nn.parameter import Parameter
|
| 24 |
+
|
| 25 |
+
sys.path.append(os.path.dirname(__file__))
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
fastfold_layer_norm_cuda = importlib.import_module("fastfold_layer_norm_cuda")
|
| 29 |
+
except ImportError:
|
| 30 |
+
from protenix.model.layer_norm.torch_ext_compile import compile
|
| 31 |
+
|
| 32 |
+
current_dir = os.path.dirname(__file__)
|
| 33 |
+
fastfold_layer_norm_cuda = compile(
|
| 34 |
+
name="fastfold_layer_norm_cuda",
|
| 35 |
+
sources=[
|
| 36 |
+
os.path.join(f"{current_dir}/kernel", file)
|
| 37 |
+
for file in ["layer_norm_cuda.cpp", "layer_norm_cuda_kernel.cu"]
|
| 38 |
+
],
|
| 39 |
+
extra_include_paths=[f"{current_dir}/kernel"],
|
| 40 |
+
build_directory=current_dir,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class FusedLayerNormAffineFunction(torch.autograd.Function):
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
def forward(ctx, input, weight, bias, normalized_shape, eps):
|
| 48 |
+
d = input.dtype
|
| 49 |
+
if d is torch.bfloat16:
|
| 50 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 51 |
+
ctx.normalized_shape = normalized_shape
|
| 52 |
+
ctx.eps = eps
|
| 53 |
+
input_ = input.contiguous()
|
| 54 |
+
weight_ = weight.contiguous().to(dtype=d)
|
| 55 |
+
bias_ = bias.contiguous().to(dtype=d)
|
| 56 |
+
output, mean, invvar = fastfold_layer_norm_cuda.forward_affine(
|
| 57 |
+
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
|
| 58 |
+
)
|
| 59 |
+
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
|
| 60 |
+
else:
|
| 61 |
+
ctx.normalized_shape = normalized_shape
|
| 62 |
+
ctx.eps = eps
|
| 63 |
+
input_ = input.contiguous()
|
| 64 |
+
weight_ = weight.contiguous()
|
| 65 |
+
bias_ = bias.contiguous()
|
| 66 |
+
output, mean, invvar = fastfold_layer_norm_cuda.forward_affine(
|
| 67 |
+
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
|
| 68 |
+
)
|
| 69 |
+
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
|
| 70 |
+
|
| 71 |
+
return output
|
| 72 |
+
|
| 73 |
+
@staticmethod
|
| 74 |
+
def backward(ctx, grad_output):
|
| 75 |
+
d = grad_output.dtype
|
| 76 |
+
if d is torch.bfloat16:
|
| 77 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 78 |
+
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
|
| 79 |
+
grad_input = grad_weight = grad_bias = None
|
| 80 |
+
grad_input, grad_weight, grad_bias = (
|
| 81 |
+
fastfold_layer_norm_cuda.backward_affine(
|
| 82 |
+
grad_output.contiguous(),
|
| 83 |
+
mean,
|
| 84 |
+
invvar,
|
| 85 |
+
input_,
|
| 86 |
+
ctx.normalized_shape,
|
| 87 |
+
weight_.to(dtype=d),
|
| 88 |
+
bias_.to(dtype=d),
|
| 89 |
+
ctx.eps,
|
| 90 |
+
)
|
| 91 |
+
)
|
| 92 |
+
else:
|
| 93 |
+
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
|
| 94 |
+
grad_input = grad_weight = grad_bias = None
|
| 95 |
+
grad_input, grad_weight, grad_bias = (
|
| 96 |
+
fastfold_layer_norm_cuda.backward_affine(
|
| 97 |
+
grad_output.contiguous(),
|
| 98 |
+
mean,
|
| 99 |
+
invvar,
|
| 100 |
+
input_,
|
| 101 |
+
ctx.normalized_shape,
|
| 102 |
+
weight_,
|
| 103 |
+
bias_,
|
| 104 |
+
ctx.eps,
|
| 105 |
+
)
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
return grad_input, grad_weight, grad_bias, None, None
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class FusedLayerNorm(torch.nn.Module):
|
| 112 |
+
|
| 113 |
+
def __init__(self, normalized_shape, eps=1e-5):
|
| 114 |
+
super(FusedLayerNorm, self).__init__()
|
| 115 |
+
|
| 116 |
+
if isinstance(normalized_shape, numbers.Integral):
|
| 117 |
+
normalized_shape = (normalized_shape,)
|
| 118 |
+
self.normalized_shape = torch.Size(normalized_shape)
|
| 119 |
+
self.eps = eps
|
| 120 |
+
self.weight = Parameter(torch.ones(*normalized_shape))
|
| 121 |
+
self.bias = Parameter(torch.ones(*normalized_shape))
|
| 122 |
+
self.reset_parameters()
|
| 123 |
+
|
| 124 |
+
def reset_parameters(self):
|
| 125 |
+
torch.nn.init.ones_(self.weight)
|
| 126 |
+
torch.nn.init.zeros_(self.bias)
|
| 127 |
+
|
| 128 |
+
def forward(self, input):
|
| 129 |
+
return self.kernel_forward(input)
|
| 130 |
+
|
| 131 |
+
def kernel_forward(self, input):
|
| 132 |
+
return FusedLayerNormAffineFunction.apply(
|
| 133 |
+
input, self.weight, self.bias, self.normalized_shape, self.eps
|
| 134 |
+
)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/layer_norm/torch_ext_compile.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
|
| 17 |
+
from torch.utils.cpp_extension import load
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def compile(name, sources, extra_include_paths, build_directory):
|
| 21 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;8.0"
|
| 22 |
+
return load(
|
| 23 |
+
name=name,
|
| 24 |
+
sources=sources,
|
| 25 |
+
extra_include_paths=extra_include_paths,
|
| 26 |
+
extra_cflags=[
|
| 27 |
+
"-O3",
|
| 28 |
+
"-DVERSION_GE_1_1",
|
| 29 |
+
"-DVERSION_GE_1_3",
|
| 30 |
+
"-DVERSION_GE_1_5",
|
| 31 |
+
],
|
| 32 |
+
extra_cuda_cflags=[
|
| 33 |
+
"-O3",
|
| 34 |
+
"--use_fast_math",
|
| 35 |
+
"-DVERSION_GE_1_1",
|
| 36 |
+
"-DVERSION_GE_1_3",
|
| 37 |
+
"-DVERSION_GE_1_5",
|
| 38 |
+
"-std=c++17",
|
| 39 |
+
"-maxrregcount=50",
|
| 40 |
+
"-U__CUDA_NO_HALF_OPERATORS__",
|
| 41 |
+
"-U__CUDA_NO_HALF_CONVERSIONS__",
|
| 42 |
+
"--expt-relaxed-constexpr",
|
| 43 |
+
"--expt-extended-lambda",
|
| 44 |
+
"-gencode",
|
| 45 |
+
"arch=compute_70,code=sm_70",
|
| 46 |
+
"-gencode",
|
| 47 |
+
"arch=compute_80,code=sm_80",
|
| 48 |
+
"-gencode",
|
| 49 |
+
"arch=compute_86,code=sm_86",
|
| 50 |
+
"-gencode",
|
| 51 |
+
"arch=compute_90,code=sm_90",
|
| 52 |
+
],
|
| 53 |
+
verbose=True,
|
| 54 |
+
build_directory=build_directory,
|
| 55 |
+
)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/loss.py
ADDED
|
@@ -0,0 +1,1812 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from typing import Any, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
|
| 22 |
+
from protenix.metrics.rmsd import weighted_rigid_align
|
| 23 |
+
from protenix.model.modules.frames import (
|
| 24 |
+
expressCoordinatesInFrame,
|
| 25 |
+
gather_frame_atom_by_indices,
|
| 26 |
+
)
|
| 27 |
+
from protenix.model.utils import expand_at_dim
|
| 28 |
+
from protenix.openfold_local.utils.checkpointing import get_checkpoint_fn
|
| 29 |
+
from protenix.utils.torch_utils import cdist
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def loss_reduction(loss: torch.Tensor, method: str = "mean") -> torch.Tensor:
|
| 33 |
+
"""reduction wrapper
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
loss (torch.Tensor): loss
|
| 37 |
+
[...]
|
| 38 |
+
method (str, optional): reduction method. Defaults to "mean".
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
torch.Tensor: reduced loss
|
| 42 |
+
[] or [...]
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
if method is None:
|
| 46 |
+
return loss
|
| 47 |
+
assert method in ["mean", "sum", "add", "max", "min"]
|
| 48 |
+
if method == "add":
|
| 49 |
+
method = "sum"
|
| 50 |
+
return getattr(torch, method)(loss)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class SmoothLDDTLoss(nn.Module):
|
| 54 |
+
"""
|
| 55 |
+
Implements Algorithm 27 [SmoothLDDTLoss] in AF3
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(
|
| 59 |
+
self,
|
| 60 |
+
eps: float = 1e-10,
|
| 61 |
+
reduction: str = "mean",
|
| 62 |
+
) -> None:
|
| 63 |
+
"""SmoothLDDTLoss
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
eps (float, optional): avoid nan. Defaults to 1e-10.
|
| 67 |
+
reduction (str, optional): reduction method for the batch dims. Defaults to mean.
|
| 68 |
+
"""
|
| 69 |
+
super(SmoothLDDTLoss, self).__init__()
|
| 70 |
+
self.eps = eps
|
| 71 |
+
self.reduction = reduction
|
| 72 |
+
|
| 73 |
+
def _chunk_forward(self, pred_distance, true_distance, c_lm=None):
|
| 74 |
+
dist_diff = torch.abs(pred_distance - true_distance)
|
| 75 |
+
# For save cuda memory we use inplace op
|
| 76 |
+
dist_diff_epsilon = 0
|
| 77 |
+
for threshold in [0.5, 1, 2, 4]:
|
| 78 |
+
dist_diff_epsilon += 0.25 * torch.sigmoid(threshold - dist_diff)
|
| 79 |
+
|
| 80 |
+
# Compute mean
|
| 81 |
+
if c_lm is not None:
|
| 82 |
+
lddt = torch.sum(c_lm * dist_diff_epsilon, dim=(-1, -2)) / (
|
| 83 |
+
torch.sum(c_lm, dim=(-1, -2)) + self.eps
|
| 84 |
+
) # [..., N_sample]
|
| 85 |
+
else:
|
| 86 |
+
# It's for sparse forward mode
|
| 87 |
+
lddt = torch.mean(dist_diff_epsilon, dim=-1)
|
| 88 |
+
return lddt
|
| 89 |
+
|
| 90 |
+
def forward(
|
| 91 |
+
self,
|
| 92 |
+
pred_distance: torch.Tensor,
|
| 93 |
+
true_distance: torch.Tensor,
|
| 94 |
+
distance_mask: torch.Tensor,
|
| 95 |
+
lddt_mask: torch.Tensor,
|
| 96 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 97 |
+
) -> torch.Tensor:
|
| 98 |
+
"""SmoothLDDTLoss
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
pred_distance (torch.Tensor): the diffusion denoised atom-atom distance
|
| 102 |
+
[..., N_sample, N_atom, N_atom]
|
| 103 |
+
true_distance (torch.Tensor): the ground truth coordinates
|
| 104 |
+
[..., N_atom, N_atom]
|
| 105 |
+
distance_mask (torch.Tensor): whether true coordinates exist.
|
| 106 |
+
[N_atom, N_atom]
|
| 107 |
+
lddt_mask (torch.Tensor, optional): whether true distance is within radius (30A for nuc and 15A for others)
|
| 108 |
+
[N_atom, N_atom]
|
| 109 |
+
diffusion_chunk_size (Optional[int]): Chunk size over the N_sample dimension. Defaults to None.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
torch.Tensor: the smooth lddt loss
|
| 113 |
+
[...] if reduction is None else []
|
| 114 |
+
"""
|
| 115 |
+
c_lm = lddt_mask.bool().unsqueeze(dim=-3).detach() # [..., 1, N_atom, N_atom]
|
| 116 |
+
# Compute distance error
|
| 117 |
+
# [..., N_sample , N_atom, N_atom]
|
| 118 |
+
if diffusion_chunk_size is None:
|
| 119 |
+
lddt = self._chunk_forward(
|
| 120 |
+
pred_distance=pred_distance, true_distance=true_distance, c_lm=c_lm
|
| 121 |
+
)
|
| 122 |
+
else:
|
| 123 |
+
# Default use checkpoint for saving memory
|
| 124 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 125 |
+
lddt = []
|
| 126 |
+
N_sample = pred_distance.shape[-3]
|
| 127 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 128 |
+
N_sample % diffusion_chunk_size != 0
|
| 129 |
+
)
|
| 130 |
+
for i in range(no_chunks):
|
| 131 |
+
lddt_i = checkpoint_fn(
|
| 132 |
+
self._chunk_forward,
|
| 133 |
+
pred_distance[
|
| 134 |
+
...,
|
| 135 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 136 |
+
:,
|
| 137 |
+
:,
|
| 138 |
+
],
|
| 139 |
+
true_distance,
|
| 140 |
+
c_lm,
|
| 141 |
+
)
|
| 142 |
+
lddt.append(lddt_i)
|
| 143 |
+
lddt = torch.cat(lddt, dim=-1)
|
| 144 |
+
|
| 145 |
+
lddt = lddt.mean(dim=-1) # [...]
|
| 146 |
+
return 1 - loss_reduction(lddt, method=self.reduction)
|
| 147 |
+
|
| 148 |
+
def sparse_forward(
|
| 149 |
+
self,
|
| 150 |
+
pred_coordinate: torch.Tensor,
|
| 151 |
+
true_coordinate: torch.Tensor,
|
| 152 |
+
lddt_mask: torch.Tensor,
|
| 153 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 154 |
+
) -> torch.Tensor:
|
| 155 |
+
"""SmoothLDDTLoss sparse implementation
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
pred_coordinate (torch.Tensor): the diffusion denoised atom coordinates
|
| 159 |
+
[..., N_sample, N_atom, 3]
|
| 160 |
+
true_coordinate (torch.Tensor): the ground truth atom coordinates
|
| 161 |
+
[..., N_atom, 3]
|
| 162 |
+
lddt_mask (torch.Tensor, optional): whether true distance is within radius (30A for nuc and 15A for others)
|
| 163 |
+
[N_atom, N_atom]
|
| 164 |
+
diffusion_chunk_size (Optional[int]): Chunk size over the N_sample dimension. Defaults to None.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
torch.Tensor: the smooth lddt loss
|
| 168 |
+
[...] if reduction is None else []
|
| 169 |
+
"""
|
| 170 |
+
lddt_indices = torch.nonzero(lddt_mask, as_tuple=True)
|
| 171 |
+
true_coords_l = true_coordinate.index_select(-2, lddt_indices[0])
|
| 172 |
+
true_coords_m = true_coordinate.index_select(-2, lddt_indices[1])
|
| 173 |
+
true_distance_sparse_lm = torch.norm(true_coords_l - true_coords_m, p=2, dim=-1)
|
| 174 |
+
if diffusion_chunk_size is None:
|
| 175 |
+
pred_coords_l = pred_coordinate.index_select(-2, lddt_indices[0])
|
| 176 |
+
pred_coords_m = pred_coordinate.index_select(-2, lddt_indices[1])
|
| 177 |
+
# \delta x_{lm} and \delta x_{lm}^{GT} in the Algorithm 27
|
| 178 |
+
pred_distance_sparse_lm = torch.norm(
|
| 179 |
+
pred_coords_l - pred_coords_m, p=2, dim=-1
|
| 180 |
+
)
|
| 181 |
+
lddt = self._chunk_forward(
|
| 182 |
+
pred_distance_sparse_lm, true_distance_sparse_lm, c_lm=None
|
| 183 |
+
)
|
| 184 |
+
else:
|
| 185 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 186 |
+
lddt = []
|
| 187 |
+
N_sample = pred_coordinate.shape[-3]
|
| 188 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 189 |
+
N_sample % diffusion_chunk_size != 0
|
| 190 |
+
)
|
| 191 |
+
for i in range(no_chunks):
|
| 192 |
+
pred_coords_i_l = pred_coordinate[
|
| 193 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size, :, :
|
| 194 |
+
].index_select(-2, lddt_indices[0])
|
| 195 |
+
pred_coords_i_m = pred_coordinate[
|
| 196 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size, :, :
|
| 197 |
+
].index_select(-2, lddt_indices[1])
|
| 198 |
+
|
| 199 |
+
# \delta x_{lm} and \delta x_{lm}^{GT} in the Algorithm 27
|
| 200 |
+
pred_distance_sparse_i_lm = torch.norm(
|
| 201 |
+
pred_coords_i_l - pred_coords_i_m, p=2, dim=-1
|
| 202 |
+
)
|
| 203 |
+
lddt_i = checkpoint_fn(
|
| 204 |
+
self._chunk_forward,
|
| 205 |
+
pred_distance_sparse_i_lm,
|
| 206 |
+
true_distance_sparse_lm,
|
| 207 |
+
)
|
| 208 |
+
lddt.append(lddt_i)
|
| 209 |
+
lddt = torch.cat(lddt, dim=-1)
|
| 210 |
+
|
| 211 |
+
lddt = lddt.mean(dim=-1) # [...]
|
| 212 |
+
return 1 - loss_reduction(lddt, method=self.reduction)
|
| 213 |
+
|
| 214 |
+
def dense_forward(
|
| 215 |
+
self,
|
| 216 |
+
pred_coordinate: torch.Tensor,
|
| 217 |
+
true_coordinate: torch.Tensor,
|
| 218 |
+
lddt_mask: torch.Tensor,
|
| 219 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 220 |
+
) -> torch.Tensor:
|
| 221 |
+
"""SmoothLDDTLoss sparse implementation
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
pred_coordinate (torch.Tensor): the diffusion denoised atom coordinates
|
| 225 |
+
[..., N_sample, N_atom, 3]
|
| 226 |
+
true_coordinate (torch.Tensor): the ground truth atom coordinates
|
| 227 |
+
[..., N_atom, 3]
|
| 228 |
+
lddt_mask (torch.Tensor, optional): whether true distance is within radius (30A for nuc and 15A for others)
|
| 229 |
+
[N_atom, N_atom]
|
| 230 |
+
diffusion_chunk_size (Optional[int]): Chunk size over the N_sample dimension. Defaults to None.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
torch.Tensor: the smooth lddt loss
|
| 234 |
+
[...] if reduction is None else []
|
| 235 |
+
"""
|
| 236 |
+
c_lm = lddt_mask.bool().unsqueeze(dim=-3).detach() # [..., 1, N_atom, N_atom]
|
| 237 |
+
# Compute distance error
|
| 238 |
+
# [..., N_sample , N_atom, N_atom]
|
| 239 |
+
true_distance = torch.cdist(true_coordinate, true_coordinate)
|
| 240 |
+
if diffusion_chunk_size is None:
|
| 241 |
+
pred_distance = torch.cdist(pred_coordinate, pred_coordinate)
|
| 242 |
+
lddt = self._chunk_forward(
|
| 243 |
+
pred_distance=pred_distance, true_distance=true_distance, c_lm=c_lm
|
| 244 |
+
)
|
| 245 |
+
else:
|
| 246 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 247 |
+
lddt = []
|
| 248 |
+
N_sample = pred_coordinate.shape[-3]
|
| 249 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 250 |
+
N_sample % diffusion_chunk_size != 0
|
| 251 |
+
)
|
| 252 |
+
for i in range(no_chunks):
|
| 253 |
+
pred_distance_i = torch.cdist(
|
| 254 |
+
pred_coordinate[
|
| 255 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 256 |
+
:,
|
| 257 |
+
:,
|
| 258 |
+
],
|
| 259 |
+
pred_coordinate[
|
| 260 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 261 |
+
:,
|
| 262 |
+
:,
|
| 263 |
+
],
|
| 264 |
+
)
|
| 265 |
+
lddt_i = checkpoint_fn(
|
| 266 |
+
self._chunk_forward,
|
| 267 |
+
pred_distance_i,
|
| 268 |
+
true_distance,
|
| 269 |
+
c_lm,
|
| 270 |
+
)
|
| 271 |
+
lddt.append(lddt_i)
|
| 272 |
+
lddt = torch.cat(lddt, dim=-1)
|
| 273 |
+
|
| 274 |
+
lddt = lddt.mean(dim=-1) # [...]
|
| 275 |
+
return 1 - loss_reduction(lddt, method=self.reduction)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class BondLoss(nn.Module):
|
| 279 |
+
"""
|
| 280 |
+
Implements Formula 5 [BondLoss] in AF3
|
| 281 |
+
"""
|
| 282 |
+
|
| 283 |
+
def __init__(self, eps: float = 1e-6, reduction: str = "mean") -> None:
|
| 284 |
+
"""BondLoss
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
eps (float, optional): avoid nan. Defaults to 1e-6.
|
| 288 |
+
reduction (str, optional): reduction method for the batch dims. Defaults to mean.
|
| 289 |
+
"""
|
| 290 |
+
super(BondLoss, self).__init__()
|
| 291 |
+
self.eps = eps
|
| 292 |
+
self.reduction = reduction
|
| 293 |
+
|
| 294 |
+
def _chunk_forward(self, pred_distance, true_distance, bond_mask):
|
| 295 |
+
# Distance squared error
|
| 296 |
+
# [..., N_sample , N_atom, N_atom]
|
| 297 |
+
dist_squared_err = (pred_distance - true_distance.unsqueeze(dim=-3)) ** 2
|
| 298 |
+
bond_loss = torch.sum(dist_squared_err * bond_mask, dim=(-1, -2)) / torch.sum(
|
| 299 |
+
bond_mask + self.eps, dim=(-1, -2)
|
| 300 |
+
) # [..., N_sample]
|
| 301 |
+
return bond_loss
|
| 302 |
+
|
| 303 |
+
def forward(
|
| 304 |
+
self,
|
| 305 |
+
pred_distance: torch.Tensor,
|
| 306 |
+
true_distance: torch.Tensor,
|
| 307 |
+
distance_mask: torch.Tensor,
|
| 308 |
+
bond_mask: torch.Tensor,
|
| 309 |
+
per_sample_scale: torch.Tensor = None,
|
| 310 |
+
diffusion_chunk_size: Optional[int] = None,
|
| 311 |
+
) -> torch.Tensor:
|
| 312 |
+
"""BondLoss
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
pred_distance (torch.Tensor): the diffusion denoised atom-atom distance
|
| 316 |
+
[..., N_sample, N_atom, N_atom]
|
| 317 |
+
true_distance (torch.Tensor): the ground truth coordinates
|
| 318 |
+
[..., N_atom, N_atom]
|
| 319 |
+
distance_mask (torch.Tensor): whether true coordinates exist.
|
| 320 |
+
[N_atom, N_atom] or [..., N_atom, N_atom]
|
| 321 |
+
bond_mask (torch.Tensor): bonds considered in this loss
|
| 322 |
+
[N_atom, N_atom] or [..., N_atom, N_atom]
|
| 323 |
+
per_sample_scale (torch.Tensor, optional): whether to scale the loss by the per-sample noise-level.
|
| 324 |
+
[..., N_sample]
|
| 325 |
+
diffusion_chunk_size (Optional[int]): Chunk size over the N_sample dimension. Defaults to None.
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
torch.Tensor: the bond loss
|
| 329 |
+
[...] if reduction is None else []
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
bond_mask = (bond_mask * distance_mask).unsqueeze(
|
| 333 |
+
dim=-3
|
| 334 |
+
) # [1, N_atom, N_atom] or [..., 1, N_atom, N_atom]
|
| 335 |
+
# Bond Loss
|
| 336 |
+
if diffusion_chunk_size is None:
|
| 337 |
+
bond_loss = self._chunk_forward(
|
| 338 |
+
pred_distance=pred_distance,
|
| 339 |
+
true_distance=true_distance,
|
| 340 |
+
bond_mask=bond_mask,
|
| 341 |
+
)
|
| 342 |
+
else:
|
| 343 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 344 |
+
bond_loss = []
|
| 345 |
+
N_sample = pred_distance.shape[-3]
|
| 346 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 347 |
+
N_sample % diffusion_chunk_size != 0
|
| 348 |
+
)
|
| 349 |
+
for i in range(no_chunks):
|
| 350 |
+
bond_loss_i = checkpoint_fn(
|
| 351 |
+
self._chunk_forward,
|
| 352 |
+
pred_distance[
|
| 353 |
+
...,
|
| 354 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 355 |
+
:,
|
| 356 |
+
:,
|
| 357 |
+
],
|
| 358 |
+
true_distance,
|
| 359 |
+
bond_mask,
|
| 360 |
+
)
|
| 361 |
+
bond_loss.append(bond_loss_i)
|
| 362 |
+
bond_loss = torch.cat(bond_loss, dim=-1)
|
| 363 |
+
if per_sample_scale is not None:
|
| 364 |
+
bond_loss = bond_loss * per_sample_scale
|
| 365 |
+
|
| 366 |
+
bond_loss = bond_loss.mean(dim=-1) # [...]
|
| 367 |
+
return loss_reduction(bond_loss, method=self.reduction)
|
| 368 |
+
|
| 369 |
+
def sparse_forward(
|
| 370 |
+
self,
|
| 371 |
+
pred_coordinate: torch.Tensor,
|
| 372 |
+
true_coordinate: torch.Tensor,
|
| 373 |
+
distance_mask: torch.Tensor,
|
| 374 |
+
bond_mask: torch.Tensor,
|
| 375 |
+
per_sample_scale: torch.Tensor = None,
|
| 376 |
+
) -> torch.Tensor:
|
| 377 |
+
"""BondLoss sparse implementation
|
| 378 |
+
|
| 379 |
+
Args:
|
| 380 |
+
pred_coordinate (torch.Tensor): the diffusion denoised atom coordinates
|
| 381 |
+
[..., N_sample, N_atom, 3]
|
| 382 |
+
true_coordinate (torch.Tensor): the ground truth atom coordinates
|
| 383 |
+
[..., N_atom, 3]
|
| 384 |
+
distance_mask (torch.Tensor): whether true coordinates exist.
|
| 385 |
+
[N_atom, N_atom] or [..., N_atom, N_atom]
|
| 386 |
+
bond_mask (torch.Tensor): bonds considered in this loss
|
| 387 |
+
[N_atom, N_atom] or [..., N_atom, N_atom]
|
| 388 |
+
per_sample_scale (torch.Tensor, optional): whether to scale the loss by the per-sample noise-level.
|
| 389 |
+
[..., N_sample]
|
| 390 |
+
Returns:
|
| 391 |
+
torch.Tensor: the bond loss
|
| 392 |
+
[...] if reduction is None else []
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
bond_mask = bond_mask * distance_mask
|
| 396 |
+
bond_indices = torch.nonzero(bond_mask, as_tuple=True)
|
| 397 |
+
pred_coords_i = pred_coordinate.index_select(-2, bond_indices[0])
|
| 398 |
+
pred_coords_j = pred_coordinate.index_select(-2, bond_indices[1])
|
| 399 |
+
true_coords_i = true_coordinate.index_select(-2, bond_indices[0])
|
| 400 |
+
true_coords_j = true_coordinate.index_select(-2, bond_indices[1])
|
| 401 |
+
|
| 402 |
+
pred_distance_sparse = torch.norm(pred_coords_i - pred_coords_j, p=2, dim=-1)
|
| 403 |
+
true_distance_sparse = torch.norm(true_coords_i - true_coords_j, p=2, dim=-1)
|
| 404 |
+
dist_squared_err_sparse = (pred_distance_sparse - true_distance_sparse) ** 2
|
| 405 |
+
# Protecting special data that has size: tensor([], size=(x, 0), grad_fn=<PowBackward0>)
|
| 406 |
+
if dist_squared_err_sparse.numel() == 0:
|
| 407 |
+
return torch.tensor(
|
| 408 |
+
0.0, device=dist_squared_err_sparse.device, requires_grad=True
|
| 409 |
+
)
|
| 410 |
+
bond_loss = torch.mean(dist_squared_err_sparse, dim=-1) # [..., N_sample]
|
| 411 |
+
if per_sample_scale is not None:
|
| 412 |
+
bond_loss = bond_loss * per_sample_scale
|
| 413 |
+
|
| 414 |
+
bond_loss = bond_loss.mean(dim=-1) # [...]
|
| 415 |
+
return bond_loss
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def compute_lddt_mask(
|
| 419 |
+
true_distance: torch.Tensor,
|
| 420 |
+
distance_mask: torch.Tensor,
|
| 421 |
+
is_nucleotide: torch.Tensor,
|
| 422 |
+
is_nucleotide_threshold: float = 30.0,
|
| 423 |
+
is_not_nucleotide_threshold: float = 15.0,
|
| 424 |
+
) -> torch.Tensor:
|
| 425 |
+
"""calculate the atom pair mask with the bespoke radius
|
| 426 |
+
|
| 427 |
+
Args:
|
| 428 |
+
true_distance (torch.Tensor): the ground truth coordinates
|
| 429 |
+
[..., N_atom, N_atom]
|
| 430 |
+
distance_mask (torch.Tensor): whether true coordinates exist.
|
| 431 |
+
[..., N_atom, N_atom] or [N_atom, N_atom]
|
| 432 |
+
is_nucleotide (torch.Tensor): Indicator for nucleotide atoms.
|
| 433 |
+
[..., N_atom] or [N_atom]
|
| 434 |
+
is_nucleotide_threshold (float): Threshold distance for nucleotide atoms. Defaults to 30.0.
|
| 435 |
+
is_not_nucleotide_threshold (float): Threshold distance for non-nucleotide atoms. Defaults to 15.0.
|
| 436 |
+
|
| 437 |
+
Returns:
|
| 438 |
+
c_lm (torch.Tenson): the atom pair mask c_lm, not symmetric
|
| 439 |
+
[..., N_atom, N_atom]
|
| 440 |
+
"""
|
| 441 |
+
# Restrict to bespoke inclusion radius
|
| 442 |
+
is_nucleotide_mask = is_nucleotide.bool()
|
| 443 |
+
c_lm = (true_distance < is_nucleotide_threshold) * is_nucleotide_mask[..., None] + (
|
| 444 |
+
true_distance < is_not_nucleotide_threshold
|
| 445 |
+
) * (
|
| 446 |
+
~is_nucleotide_mask[..., None]
|
| 447 |
+
) # [..., N_atom, N_atom]
|
| 448 |
+
|
| 449 |
+
# Zero-out diagonals of c_lm and cast to float
|
| 450 |
+
c_lm = c_lm * (
|
| 451 |
+
1 - torch.eye(n=c_lm.size(-1), device=c_lm.device, dtype=true_distance.dtype)
|
| 452 |
+
)
|
| 453 |
+
# Zero-out atom pairs without true coordinates
|
| 454 |
+
# Note: the sparsity of c_lm is ~10% in 5000 atom-pairs,
|
| 455 |
+
# and becomes more sparse as the number of atoms increases,
|
| 456 |
+
# change to sparse implementation can reduce cuda memory
|
| 457 |
+
c_lm = c_lm * distance_mask # [..., N_atom, N_atom]
|
| 458 |
+
return c_lm
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def softmax_cross_entropy(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
|
| 462 |
+
"""Softmax cross entropy
|
| 463 |
+
|
| 464 |
+
Args:
|
| 465 |
+
logits (torch.Tensor): classification logits
|
| 466 |
+
[..., num_class]
|
| 467 |
+
labels (torch.Tensor): classification labels (value = probability)
|
| 468 |
+
[..., num_class]
|
| 469 |
+
|
| 470 |
+
Returns:
|
| 471 |
+
torch.Tensor: softmax cross entropy
|
| 472 |
+
[...]
|
| 473 |
+
"""
|
| 474 |
+
loss = -1 * torch.sum(
|
| 475 |
+
labels * F.log_softmax(logits, dim=-1),
|
| 476 |
+
dim=-1,
|
| 477 |
+
)
|
| 478 |
+
return loss
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class DistogramLoss(nn.Module):
|
| 482 |
+
"""
|
| 483 |
+
Implements DistogramLoss in AF3
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
def __init__(
|
| 487 |
+
self,
|
| 488 |
+
min_bin: float = 2.3125,
|
| 489 |
+
max_bin: float = 21.6875,
|
| 490 |
+
no_bins: int = 64,
|
| 491 |
+
eps: float = 1e-6,
|
| 492 |
+
reduction: str = "mean",
|
| 493 |
+
) -> None:
|
| 494 |
+
"""Distogram loss
|
| 495 |
+
This head and loss are identical to AlphaFold 2, where the pairwise token distances use the representative atom for each token:
|
| 496 |
+
Cβ for protein residues (Cα for glycine),
|
| 497 |
+
C4 for purines and C2 for pyrimidines.
|
| 498 |
+
All ligands already have a single atom per token.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
min_bin (float, optional): min boundary of bins. Defaults to 2.3125.
|
| 502 |
+
max_bin (float, optional): max boundary of bins. Defaults to 21.6875.
|
| 503 |
+
no_bins (int, optional): number of bins. Defaults to 64.
|
| 504 |
+
eps (float, optional): small number added to denominator. Defaults to 1e-6.
|
| 505 |
+
reduce (bool, optional): reduce dim. Defaults to True.
|
| 506 |
+
"""
|
| 507 |
+
super(DistogramLoss, self).__init__()
|
| 508 |
+
self.min_bin = min_bin
|
| 509 |
+
self.max_bin = max_bin
|
| 510 |
+
self.no_bins = no_bins
|
| 511 |
+
self.eps = eps
|
| 512 |
+
self.reduction = reduction
|
| 513 |
+
|
| 514 |
+
def calculate_label(
|
| 515 |
+
self,
|
| 516 |
+
true_coordinate: torch.Tensor,
|
| 517 |
+
coordinate_mask: torch.Tensor,
|
| 518 |
+
rep_atom_mask: torch.Tensor,
|
| 519 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 520 |
+
"""calculate the label as bins
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
true_coordinate (torch.Tensor): true coordinates.
|
| 524 |
+
[..., N_atom, 3]
|
| 525 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist.
|
| 526 |
+
[N_atom] or [..., N_atom]
|
| 527 |
+
rep_atom_mask (torch.Tensor): representative atom mask
|
| 528 |
+
[N_atom]
|
| 529 |
+
|
| 530 |
+
Returns:
|
| 531 |
+
true_bins (torch.Tensor): distance error assigned into bins (one-hot).
|
| 532 |
+
[..., N_token, N_token, no_bins]
|
| 533 |
+
pair_coordinate_mask (torch.Tensor): whether the coordinates of representative atom pairs exist.
|
| 534 |
+
[N_token, N_token] or [..., N_token, N_token]
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
boundaries = torch.linspace(
|
| 538 |
+
start=self.min_bin,
|
| 539 |
+
end=self.max_bin,
|
| 540 |
+
steps=self.no_bins - 1,
|
| 541 |
+
device=true_coordinate.device,
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
# Compute label: the true bins
|
| 545 |
+
# True distance
|
| 546 |
+
rep_atom_mask = rep_atom_mask.bool()
|
| 547 |
+
true_coordinate = true_coordinate[..., rep_atom_mask, :] # [..., N_token, 3]
|
| 548 |
+
gt_dist = cdist(true_coordinate, true_coordinate) # [..., N_token, N_token]
|
| 549 |
+
# Assign distance to bins
|
| 550 |
+
true_bins = torch.sum(
|
| 551 |
+
gt_dist.unsqueeze(dim=-1) > boundaries, dim=-1
|
| 552 |
+
) # range in [0, no_bins-1], shape = [..., N_token, N_token]
|
| 553 |
+
|
| 554 |
+
# Mask
|
| 555 |
+
token_mask = coordinate_mask[..., rep_atom_mask]
|
| 556 |
+
pair_mask = token_mask[..., None] * token_mask[..., None, :]
|
| 557 |
+
|
| 558 |
+
return F.one_hot(true_bins, self.no_bins), pair_mask
|
| 559 |
+
|
| 560 |
+
def forward(
|
| 561 |
+
self,
|
| 562 |
+
logits: torch.Tensor,
|
| 563 |
+
true_coordinate: torch.Tensor,
|
| 564 |
+
coordinate_mask: torch.Tensor,
|
| 565 |
+
rep_atom_mask: torch.Tensor,
|
| 566 |
+
) -> torch.Tensor:
|
| 567 |
+
"""Distogram loss
|
| 568 |
+
|
| 569 |
+
Args:
|
| 570 |
+
logits (torch.Tensor): logits.
|
| 571 |
+
[..., N_token, N_token, no_bins]
|
| 572 |
+
true_coordinate (torch.Tensor): true coordinates.
|
| 573 |
+
[..., N_atom, 3]
|
| 574 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist.
|
| 575 |
+
[N_atom] or [..., N_atom]
|
| 576 |
+
rep_atom_mask (torch.Tensor): representative atom mask.
|
| 577 |
+
[N_atom]
|
| 578 |
+
|
| 579 |
+
Returns:
|
| 580 |
+
torch.Tensor: the return loss.
|
| 581 |
+
[...] if self.reduction is not None else []
|
| 582 |
+
"""
|
| 583 |
+
|
| 584 |
+
with torch.no_grad():
|
| 585 |
+
true_bins, pair_mask = self.calculate_label(
|
| 586 |
+
true_coordinate=true_coordinate,
|
| 587 |
+
coordinate_mask=coordinate_mask,
|
| 588 |
+
rep_atom_mask=rep_atom_mask,
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
errors = softmax_cross_entropy(
|
| 592 |
+
logits=logits,
|
| 593 |
+
labels=true_bins,
|
| 594 |
+
) # [..., N_token, N_token]
|
| 595 |
+
|
| 596 |
+
denom = self.eps + torch.sum(pair_mask, dim=(-1, -2))
|
| 597 |
+
loss = torch.sum(errors * pair_mask, dim=(-1, -2))
|
| 598 |
+
loss = loss / denom
|
| 599 |
+
|
| 600 |
+
return loss_reduction(loss, method=self.reduction)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
class PDELoss(nn.Module):
|
| 604 |
+
"""
|
| 605 |
+
Implements Predicted distance loss in AF3
|
| 606 |
+
"""
|
| 607 |
+
|
| 608 |
+
def __init__(
|
| 609 |
+
self,
|
| 610 |
+
min_bin: float = 0,
|
| 611 |
+
max_bin: float = 32,
|
| 612 |
+
no_bins: int = 64,
|
| 613 |
+
eps: float = 1e-6,
|
| 614 |
+
reduction: str = "mean",
|
| 615 |
+
) -> None:
|
| 616 |
+
"""PDELoss
|
| 617 |
+
This loss are between representative token atoms i and j in the mini-rollout prediction
|
| 618 |
+
|
| 619 |
+
Args:
|
| 620 |
+
min_bin (float, optional): min boundary of bins. Defaults to 0.
|
| 621 |
+
max_bin (float, optional): max boundary of bins. Defaults to 32.
|
| 622 |
+
no_bins (int, optional): number of bins. Defaults to 64.
|
| 623 |
+
eps (float, optional): small number added to denominator. Defaults to 1e-6.
|
| 624 |
+
reduction (str, optional): reduction method for the batch dims. Defaults to mean.
|
| 625 |
+
"""
|
| 626 |
+
super(PDELoss, self).__init__()
|
| 627 |
+
self.min_bin = min_bin
|
| 628 |
+
self.max_bin = max_bin
|
| 629 |
+
self.no_bins = no_bins
|
| 630 |
+
self.eps = eps
|
| 631 |
+
self.reduction = reduction
|
| 632 |
+
|
| 633 |
+
def calculate_label(
|
| 634 |
+
self,
|
| 635 |
+
pred_coordinate: torch.Tensor,
|
| 636 |
+
true_coordinate: torch.Tensor,
|
| 637 |
+
coordinate_mask: torch.Tensor,
|
| 638 |
+
rep_atom_mask: torch.Tensor,
|
| 639 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 640 |
+
"""calculate the label as bins
|
| 641 |
+
|
| 642 |
+
Args:
|
| 643 |
+
pred_coordinate (torch.Tensor): predicted coordinates.
|
| 644 |
+
[..., N_sample, N_atom, 3]
|
| 645 |
+
true_coordinate (torch.Tensor): true coordinates.
|
| 646 |
+
[..., N_atom, 3]
|
| 647 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist.
|
| 648 |
+
[N_atom] or [..., N_atom]
|
| 649 |
+
rep_atom_mask (torch.Tensor):
|
| 650 |
+
[N_atom]
|
| 651 |
+
|
| 652 |
+
Returns:
|
| 653 |
+
true_bins (torch.Tensor): distance error assigned into bins (one-hot).
|
| 654 |
+
[..., N_sample, N_token, N_token, no_bins]
|
| 655 |
+
pair_coordinate_mask (torch.Tensor): whether the coordinates of representative atom pairs exist.
|
| 656 |
+
[N_token, N_token] or [..., N_token, N_token]
|
| 657 |
+
"""
|
| 658 |
+
|
| 659 |
+
boundaries = torch.linspace(
|
| 660 |
+
start=self.min_bin,
|
| 661 |
+
end=self.max_bin,
|
| 662 |
+
steps=self.no_bins + 1,
|
| 663 |
+
device=pred_coordinate.device,
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# Compute label: the true bins
|
| 667 |
+
# True distance
|
| 668 |
+
rep_atom_mask = rep_atom_mask.bool()
|
| 669 |
+
true_coordinate = true_coordinate[..., rep_atom_mask, :] # [..., N_token, 3]
|
| 670 |
+
gt_dist = cdist(true_coordinate, true_coordinate) # [..., N_token, N_token]
|
| 671 |
+
# Predicted distance
|
| 672 |
+
pred_coordinate = pred_coordinate[..., rep_atom_mask, :]
|
| 673 |
+
pred_dist = cdist(
|
| 674 |
+
pred_coordinate, pred_coordinate
|
| 675 |
+
) # [..., N_sample, N_token, N_token]
|
| 676 |
+
# Distance error
|
| 677 |
+
dist_error = torch.abs(pred_dist - gt_dist.unsqueeze(dim=-3))
|
| 678 |
+
|
| 679 |
+
# Assign distance error to bins
|
| 680 |
+
true_bins = torch.sum(
|
| 681 |
+
dist_error.unsqueeze(dim=-1) > boundaries, dim=-1
|
| 682 |
+
) # range in [1, no_bins + 1], shape = [..., N_sample, N_token, N_token]
|
| 683 |
+
true_bins = torch.clamp(
|
| 684 |
+
true_bins, min=1, max=self.no_bins
|
| 685 |
+
) # just in case bin=0 occurs
|
| 686 |
+
|
| 687 |
+
# Mask
|
| 688 |
+
token_mask = coordinate_mask[..., rep_atom_mask]
|
| 689 |
+
pair_mask = token_mask[..., None] * token_mask[..., None, :]
|
| 690 |
+
|
| 691 |
+
return F.one_hot(true_bins - 1, self.no_bins).detach(), pair_mask.detach()
|
| 692 |
+
|
| 693 |
+
def forward(
|
| 694 |
+
self,
|
| 695 |
+
logits: torch.Tensor,
|
| 696 |
+
pred_coordinate: torch.Tensor,
|
| 697 |
+
true_coordinate: torch.Tensor,
|
| 698 |
+
coordinate_mask: torch.Tensor,
|
| 699 |
+
rep_atom_mask: torch.Tensor,
|
| 700 |
+
) -> torch.Tensor:
|
| 701 |
+
"""PDELoss
|
| 702 |
+
|
| 703 |
+
Args:
|
| 704 |
+
logits (torch.Tensor): logits
|
| 705 |
+
[..., N_sample, N_token, N_token, no_bins]
|
| 706 |
+
pred_coordinate: (torch.Tensor): predict coordinates
|
| 707 |
+
[..., N_sample, N_atom, 3]
|
| 708 |
+
true_coordinate (torch.Tensor): true coordinates
|
| 709 |
+
[..., N_atom, 3]
|
| 710 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 711 |
+
[N_atom] or [..., N_atom]
|
| 712 |
+
rep_atom_mask (torch.Tensor): representative atom mask for this loss
|
| 713 |
+
[N_atom]
|
| 714 |
+
|
| 715 |
+
Returns:
|
| 716 |
+
torch.Tensor: the return loss
|
| 717 |
+
[...] if reduction is None else []
|
| 718 |
+
"""
|
| 719 |
+
|
| 720 |
+
with torch.no_grad():
|
| 721 |
+
true_bins, pair_mask = self.calculate_label(
|
| 722 |
+
pred_coordinate=pred_coordinate,
|
| 723 |
+
true_coordinate=true_coordinate,
|
| 724 |
+
coordinate_mask=coordinate_mask,
|
| 725 |
+
rep_atom_mask=rep_atom_mask,
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
errors = softmax_cross_entropy(
|
| 729 |
+
logits=logits,
|
| 730 |
+
labels=true_bins,
|
| 731 |
+
) # [..., N_sample, N_token, N_token]
|
| 732 |
+
|
| 733 |
+
denom = self.eps + torch.sum(pair_mask, dim=(-1, -2)) # [...]
|
| 734 |
+
loss = errors * pair_mask.unsqueeze(dim=-3) # [..., N_sample, N_token, N_token]
|
| 735 |
+
loss = torch.sum(loss, dim=(-1, -2)) # [..., N_sample]
|
| 736 |
+
loss = loss / denom.unsqueeze(dim=-1) # [..., N_sample]
|
| 737 |
+
loss = loss.mean(dim=-1) # [...]
|
| 738 |
+
|
| 739 |
+
return loss_reduction(loss, method=self.reduction)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
# Algorithm 30 Compute alignment error
|
| 743 |
+
def compute_alignment_error_squared(
|
| 744 |
+
pred_coordinate: torch.Tensor,
|
| 745 |
+
true_coordinate: torch.Tensor,
|
| 746 |
+
pred_frames: torch.Tensor,
|
| 747 |
+
true_frames: torch.Tensor,
|
| 748 |
+
) -> torch.Tensor:
|
| 749 |
+
"""Implements Algorithm 30 Compute alignment error, but do not take the square root
|
| 750 |
+
|
| 751 |
+
Args:
|
| 752 |
+
pred_coordinate (torch.Tensor): the predict coords [frame center]
|
| 753 |
+
[..., N_sample, N_token, 3]
|
| 754 |
+
true_coordinate (torch.Tensor): the ground truth coords [frame center]
|
| 755 |
+
[..., N_token, 3]
|
| 756 |
+
pred_frames (torch.Tensor): the predict frame
|
| 757 |
+
[..., N_sample, N_frame, 3, 3]
|
| 758 |
+
true_frames (torch.Tensor): the ground truth frame
|
| 759 |
+
[..., N_frame, 3, 3]
|
| 760 |
+
|
| 761 |
+
Returns:
|
| 762 |
+
torch.Tensor: the computed alignment error
|
| 763 |
+
[..., N_sample, N_frame, N_token]
|
| 764 |
+
"""
|
| 765 |
+
x_transformed_pred = expressCoordinatesInFrame(
|
| 766 |
+
coordinate=pred_coordinate, frames=pred_frames
|
| 767 |
+
) # [..., N_sample, N_frame, N_token, 3]
|
| 768 |
+
x_transformed_true = expressCoordinatesInFrame(
|
| 769 |
+
coordinate=true_coordinate, frames=true_frames
|
| 770 |
+
) # [..., N_frame, N_token, 3]
|
| 771 |
+
squared_pae = torch.sum(
|
| 772 |
+
(x_transformed_pred - x_transformed_true.unsqueeze(dim=-4)) ** 2, dim=-1
|
| 773 |
+
) # [..., N_sample, N_frame, N_token]
|
| 774 |
+
return squared_pae
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
class PAELoss(nn.Module):
|
| 778 |
+
"""
|
| 779 |
+
Implements Predicted Aligned distance loss in AF3
|
| 780 |
+
"""
|
| 781 |
+
|
| 782 |
+
def __init__(
|
| 783 |
+
self,
|
| 784 |
+
min_bin: float = 0,
|
| 785 |
+
max_bin: float = 32,
|
| 786 |
+
no_bins: int = 64,
|
| 787 |
+
eps: float = 1e-6,
|
| 788 |
+
reduction: str = "mean",
|
| 789 |
+
) -> None:
|
| 790 |
+
"""PAELoss
|
| 791 |
+
This loss are between representative token atoms i and j in the mini-rollout prediction
|
| 792 |
+
|
| 793 |
+
Args:
|
| 794 |
+
min_bin (float, optional): min boundary of bins. Defaults to 0.
|
| 795 |
+
max_bin (float, optional): max boundary of bins. Defaults to 32.
|
| 796 |
+
no_bins (int, optional): number of bins. Defaults to 64.
|
| 797 |
+
eps (float, optional): small number added to denominator. Defaults to 1e-6.
|
| 798 |
+
reduce (bool, optional): reduce dim. Defaults to True.
|
| 799 |
+
"""
|
| 800 |
+
super(PAELoss, self).__init__()
|
| 801 |
+
self.min_bin = min_bin
|
| 802 |
+
self.max_bin = max_bin
|
| 803 |
+
self.no_bins = no_bins
|
| 804 |
+
self.eps = eps
|
| 805 |
+
self.reduction = reduction
|
| 806 |
+
|
| 807 |
+
def calculate_label(
|
| 808 |
+
self,
|
| 809 |
+
pred_coordinate: torch.Tensor,
|
| 810 |
+
true_coordinate: torch.Tensor,
|
| 811 |
+
coordinate_mask: torch.Tensor,
|
| 812 |
+
rep_atom_mask: torch.Tensor,
|
| 813 |
+
frame_atom_index: torch.Tensor,
|
| 814 |
+
has_frame: torch.Tensor,
|
| 815 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 816 |
+
"""calculate true PAE (squared) and true bins
|
| 817 |
+
|
| 818 |
+
Args:
|
| 819 |
+
pred_coordinate: (torch.Tensor): predict coordinates.
|
| 820 |
+
[..., N_sample, N_atom, 3]
|
| 821 |
+
true_coordinate (torch.Tensor): true coordinates.
|
| 822 |
+
[..., N_atom, 3]
|
| 823 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 824 |
+
[N_atom]
|
| 825 |
+
rep_atom_mask (torch.Tensor): masks of the representative atom for each token.
|
| 826 |
+
[N_atom]
|
| 827 |
+
frame_atom_index (torch.Tensor): indices of frame atoms (three atoms per token(=per frame)).
|
| 828 |
+
[N_token, 3[three atom]]
|
| 829 |
+
has_frame (torch.Tensor): indicates whether token_i has a valid frame.
|
| 830 |
+
[N_token]
|
| 831 |
+
Returns:
|
| 832 |
+
squared_pae (torch.Tensor): pairwise alignment error squared
|
| 833 |
+
[..., N_sample, N_frame, N_token] where N_token = rep_atom_mask.sum()
|
| 834 |
+
true_bins (torch.Tensor): the true bins
|
| 835 |
+
[..., N_sample, N_frame, N_token, no_bins]
|
| 836 |
+
frame_token_pair_mask (torch.Tensor): whether frame_i token_j both have true coordinates.
|
| 837 |
+
[N_frame, N_token]
|
| 838 |
+
"""
|
| 839 |
+
|
| 840 |
+
coordinate_mask = coordinate_mask.bool()
|
| 841 |
+
rep_atom_mask = rep_atom_mask.bool()
|
| 842 |
+
has_frame = has_frame.bool()
|
| 843 |
+
|
| 844 |
+
# NOTE: to support frame_atom_index with batch_dims, need to expand its dims before constructing frames.
|
| 845 |
+
assert len(frame_atom_index.shape) == 2
|
| 846 |
+
|
| 847 |
+
# Take valid frames: N_token -> N_frame
|
| 848 |
+
frame_atom_index = frame_atom_index[has_frame, :] # [N_frame, 3[three atom]]
|
| 849 |
+
|
| 850 |
+
# Get predicted frames and true frames
|
| 851 |
+
pred_frames = gather_frame_atom_by_indices(
|
| 852 |
+
coordinate=pred_coordinate, frame_atom_index=frame_atom_index, dim=-2
|
| 853 |
+
) # [..., N_sample, N_frame, 3[three atom], 3[coordinates]]
|
| 854 |
+
true_frames = gather_frame_atom_by_indices(
|
| 855 |
+
coordinate=true_coordinate, frame_atom_index=frame_atom_index, dim=-2
|
| 856 |
+
) # [..., N_frame, 3[three atom], 3[coordinates]]
|
| 857 |
+
|
| 858 |
+
# Get pair_mask for computing the loss
|
| 859 |
+
true_frame_coord_mask = gather_frame_atom_by_indices(
|
| 860 |
+
coordinate=coordinate_mask, frame_atom_index=frame_atom_index, dim=-1
|
| 861 |
+
) # [N_frame, 3[three atom]]
|
| 862 |
+
true_frame_coord_mask = (
|
| 863 |
+
true_frame_coord_mask.sum(dim=-1) >= 3
|
| 864 |
+
) # [N_frame] whether all atoms in the frame has coordinates
|
| 865 |
+
token_mask = coordinate_mask[rep_atom_mask] # [N_token]
|
| 866 |
+
frame_token_pair_mask = (
|
| 867 |
+
true_frame_coord_mask[..., None] * token_mask[..., None, :]
|
| 868 |
+
) # [N_frame, N_token]
|
| 869 |
+
|
| 870 |
+
squared_pae = (
|
| 871 |
+
compute_alignment_error_squared(
|
| 872 |
+
pred_coordinate=pred_coordinate[..., rep_atom_mask, :],
|
| 873 |
+
true_coordinate=true_coordinate[..., rep_atom_mask, :],
|
| 874 |
+
pred_frames=pred_frames,
|
| 875 |
+
true_frames=true_frames,
|
| 876 |
+
)
|
| 877 |
+
* frame_token_pair_mask
|
| 878 |
+
) # [..., N_sample, N_frame, N_token]
|
| 879 |
+
|
| 880 |
+
# Compute true bins
|
| 881 |
+
boundaries = torch.linspace(
|
| 882 |
+
start=self.min_bin,
|
| 883 |
+
end=self.max_bin,
|
| 884 |
+
steps=self.no_bins + 1,
|
| 885 |
+
device=pred_coordinate.device,
|
| 886 |
+
)
|
| 887 |
+
boundaries = boundaries**2
|
| 888 |
+
|
| 889 |
+
true_bins = torch.sum(
|
| 890 |
+
squared_pae.unsqueeze(dim=-1) > boundaries, dim=-1
|
| 891 |
+
) # range [1, no_bins + 1]
|
| 892 |
+
true_bins = torch.where(
|
| 893 |
+
frame_token_pair_mask,
|
| 894 |
+
true_bins,
|
| 895 |
+
torch.ones_like(true_bins) * self.no_bins,
|
| 896 |
+
)
|
| 897 |
+
true_bins = torch.clamp(
|
| 898 |
+
true_bins, min=1, max=self.no_bins
|
| 899 |
+
) # just in case bin=0 occurs
|
| 900 |
+
|
| 901 |
+
return (
|
| 902 |
+
squared_pae.detach(),
|
| 903 |
+
F.one_hot(true_bins - 1, self.no_bins).detach(),
|
| 904 |
+
frame_token_pair_mask.detach(),
|
| 905 |
+
)
|
| 906 |
+
|
| 907 |
+
def forward(
|
| 908 |
+
self,
|
| 909 |
+
logits: torch.Tensor,
|
| 910 |
+
pred_coordinate: torch.Tensor,
|
| 911 |
+
true_coordinate: torch.Tensor,
|
| 912 |
+
coordinate_mask: torch.Tensor,
|
| 913 |
+
frame_atom_index: torch.Tensor,
|
| 914 |
+
rep_atom_mask: torch.Tensor,
|
| 915 |
+
has_frame: torch.Tensor,
|
| 916 |
+
) -> torch.Tensor:
|
| 917 |
+
"""PAELoss
|
| 918 |
+
|
| 919 |
+
Args:
|
| 920 |
+
logits (torch.Tensor): logits
|
| 921 |
+
[..., N_sample, N_token, N_token, no_bins]
|
| 922 |
+
pred_coordinate: (torch.Tensor): predict coordinates
|
| 923 |
+
[..., N_sample, N_atom, 3]
|
| 924 |
+
true_coordinate (torch.Tensor): true coordinates
|
| 925 |
+
[..., N_atom, 3]
|
| 926 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 927 |
+
[N_atom]
|
| 928 |
+
rep_atom_mask (torch.Tensor): masks of the representative atom for each token.
|
| 929 |
+
[N_atom]
|
| 930 |
+
frame_atom_index (torch.Tensor): indices of frame atoms (three atoms per token(=per frame)).
|
| 931 |
+
[N_token, 3[three atom]]
|
| 932 |
+
has_frame (torch.Tensor): indicates whether token_i has a valid frame.
|
| 933 |
+
[N_token]
|
| 934 |
+
Returns:
|
| 935 |
+
torch.Tensor: the return loss
|
| 936 |
+
[] if reduce
|
| 937 |
+
[..., n] else
|
| 938 |
+
"""
|
| 939 |
+
|
| 940 |
+
has_frame = has_frame.bool()
|
| 941 |
+
rep_atom_mask = rep_atom_mask.bool()
|
| 942 |
+
assert len(has_frame.shape) == 1
|
| 943 |
+
assert len(frame_atom_index.shape) == 2
|
| 944 |
+
|
| 945 |
+
with torch.no_grad():
|
| 946 |
+
# true_bins: [..., N_sample, N_frame, N_token, no_bins]
|
| 947 |
+
# pair_mask: [N_frame, N_token]
|
| 948 |
+
_, true_bins, pair_mask = self.calculate_label(
|
| 949 |
+
pred_coordinate=pred_coordinate,
|
| 950 |
+
true_coordinate=true_coordinate,
|
| 951 |
+
frame_atom_index=frame_atom_index,
|
| 952 |
+
rep_atom_mask=rep_atom_mask,
|
| 953 |
+
coordinate_mask=coordinate_mask,
|
| 954 |
+
has_frame=has_frame,
|
| 955 |
+
)
|
| 956 |
+
|
| 957 |
+
loss = softmax_cross_entropy(
|
| 958 |
+
logits=logits[
|
| 959 |
+
..., has_frame, :, :
|
| 960 |
+
], # [..., N_sample, N_frame, N_token, no_bins]
|
| 961 |
+
labels=true_bins,
|
| 962 |
+
) # [..., N_sample, N_frame, N_token]
|
| 963 |
+
|
| 964 |
+
denom = self.eps + torch.sum(pair_mask, dim=(-1, -2)) # []
|
| 965 |
+
loss = loss * pair_mask.unsqueeze(dim=-3) # [..., N_sample, N_token, N_token]
|
| 966 |
+
loss = torch.sum(loss, dim=(-1, -2)) # [..., N_sample]
|
| 967 |
+
loss = loss / denom.unsqueeze(dim=-1) # [..., N_sample]
|
| 968 |
+
loss = loss.mean(dim=-1) # [...]
|
| 969 |
+
|
| 970 |
+
return loss_reduction(loss, self.reduction)
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
class ExperimentallyResolvedLoss(nn.Module):
|
| 974 |
+
def __init__(
|
| 975 |
+
self,
|
| 976 |
+
eps: float = 1e-6,
|
| 977 |
+
reduction: str = "mean",
|
| 978 |
+
) -> None:
|
| 979 |
+
"""
|
| 980 |
+
Args:
|
| 981 |
+
eps (float, optional): avoid nan. Defaults to 1e-6.
|
| 982 |
+
"""
|
| 983 |
+
super(ExperimentallyResolvedLoss, self).__init__()
|
| 984 |
+
self.eps = eps
|
| 985 |
+
self.reduction = reduction
|
| 986 |
+
|
| 987 |
+
def forward(
|
| 988 |
+
self,
|
| 989 |
+
logits: torch.Tensor,
|
| 990 |
+
coordinate_mask: torch.Tensor,
|
| 991 |
+
atom_mask: torch.Tensor = None,
|
| 992 |
+
) -> torch.Tensor:
|
| 993 |
+
"""
|
| 994 |
+
Args:
|
| 995 |
+
logits (torch.Tensor): logits
|
| 996 |
+
[..., N_sample, N_atom, no_bins:=2]
|
| 997 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 998 |
+
[..., N_atom] | [N_atom]
|
| 999 |
+
atom_mask (torch.Tensor, optional): whether to conside the atom in the loss
|
| 1000 |
+
[..., N_atom]
|
| 1001 |
+
Returns:
|
| 1002 |
+
torch.Tensor: the experimentally resolved loss
|
| 1003 |
+
"""
|
| 1004 |
+
is_resolved = F.one_hot(
|
| 1005 |
+
coordinate_mask.long(), 2
|
| 1006 |
+
) # [..., N_atom, 2] or [N_atom, 2]
|
| 1007 |
+
errors = softmax_cross_entropy(
|
| 1008 |
+
logits=logits, labels=is_resolved.unsqueeze(dim=-3)
|
| 1009 |
+
) # [..., N_sample, N_atom]
|
| 1010 |
+
if atom_mask is None:
|
| 1011 |
+
loss = errors.mean(dim=-1) # [..., N_sample]
|
| 1012 |
+
else:
|
| 1013 |
+
loss = torch.sum(
|
| 1014 |
+
errors * atom_mask[..., None, :], dim=-1
|
| 1015 |
+
) # [..., N_sample]
|
| 1016 |
+
loss = loss / (
|
| 1017 |
+
self.eps + torch.sum(atom_mask[..., None, :], dim=-1)
|
| 1018 |
+
) # [..., N_sample]
|
| 1019 |
+
|
| 1020 |
+
loss = loss.mean(dim=-1) # [...]
|
| 1021 |
+
return loss_reduction(loss, method=self.reduction)
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
class MSELoss(nn.Module):
|
| 1025 |
+
"""
|
| 1026 |
+
Implements Formula 2-4 [MSELoss] in AF3
|
| 1027 |
+
"""
|
| 1028 |
+
|
| 1029 |
+
def __init__(
|
| 1030 |
+
self,
|
| 1031 |
+
weight_mse: float = 1 / 3,
|
| 1032 |
+
weight_dna: float = 5.0,
|
| 1033 |
+
weight_rna=5.0,
|
| 1034 |
+
weight_ligand=10.0,
|
| 1035 |
+
eps=1e-6,
|
| 1036 |
+
reduction: str = "mean",
|
| 1037 |
+
) -> None:
|
| 1038 |
+
super(MSELoss, self).__init__()
|
| 1039 |
+
self.weight_mse = weight_mse
|
| 1040 |
+
self.weight_dna = weight_dna
|
| 1041 |
+
self.weight_rna = weight_rna
|
| 1042 |
+
self.weight_ligand = weight_ligand
|
| 1043 |
+
self.eps = eps
|
| 1044 |
+
self.reduction = reduction
|
| 1045 |
+
|
| 1046 |
+
def weighted_rigid_align(
|
| 1047 |
+
self,
|
| 1048 |
+
pred_coordinate: torch.Tensor,
|
| 1049 |
+
true_coordinate: torch.Tensor,
|
| 1050 |
+
coordinate_mask: torch.Tensor,
|
| 1051 |
+
is_dna: torch.Tensor,
|
| 1052 |
+
is_rna: torch.Tensor,
|
| 1053 |
+
is_ligand: torch.Tensor,
|
| 1054 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1055 |
+
"""compute weighted rigid alignment results
|
| 1056 |
+
|
| 1057 |
+
Args:
|
| 1058 |
+
pred_coordinate (torch.Tensor): the denoised coordinates from diffusion module
|
| 1059 |
+
[..., N_sample, N_atom, 3]
|
| 1060 |
+
true_coordinate (torch.Tensor): the ground truth coordinates
|
| 1061 |
+
[..., N_atom, 3]
|
| 1062 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 1063 |
+
[N_atom] or [..., N_atom]
|
| 1064 |
+
is_dna / is_rna / is_ligand (torch.Tensor): mol type mask
|
| 1065 |
+
[N_atom] or [..., N_atom]
|
| 1066 |
+
|
| 1067 |
+
Returns:
|
| 1068 |
+
true_coordinate_aligned (torch.Tensor): aligned coordinates for each sample
|
| 1069 |
+
[..., N_sample, N_atom, 3]
|
| 1070 |
+
weight (torch.Tensor): weights for each atom
|
| 1071 |
+
[N_atom] or [..., N_sample, N_atom]
|
| 1072 |
+
"""
|
| 1073 |
+
N_sample = pred_coordinate.size(-3)
|
| 1074 |
+
weight = (
|
| 1075 |
+
1
|
| 1076 |
+
+ self.weight_dna * is_dna
|
| 1077 |
+
+ self.weight_rna * is_rna
|
| 1078 |
+
+ self.weight_ligand * is_ligand
|
| 1079 |
+
) # [N_atom] or [..., N_atom]
|
| 1080 |
+
|
| 1081 |
+
# Apply coordinate_mask
|
| 1082 |
+
weight = weight * coordinate_mask # [N_atom] or [..., N_atom]
|
| 1083 |
+
true_coordinate = true_coordinate * coordinate_mask.unsqueeze(dim=-1)
|
| 1084 |
+
pred_coordinate = pred_coordinate * coordinate_mask[..., None, :, None]
|
| 1085 |
+
|
| 1086 |
+
# Reshape to add "N_sample" dimension
|
| 1087 |
+
true_coordinate = expand_at_dim(
|
| 1088 |
+
true_coordinate, dim=-3, n=N_sample
|
| 1089 |
+
) # [..., N_sample, N_atom, 3]
|
| 1090 |
+
if len(weight.shape) > 1:
|
| 1091 |
+
weight = expand_at_dim(
|
| 1092 |
+
weight, dim=-2, n=N_sample
|
| 1093 |
+
) # [..., N_sample, N_atom]
|
| 1094 |
+
|
| 1095 |
+
# Align GT coords to predicted coords
|
| 1096 |
+
d = pred_coordinate.dtype
|
| 1097 |
+
# Some ops in weighted_rigid_align do not support BFloat16 training
|
| 1098 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 1099 |
+
true_coordinate_aligned = weighted_rigid_align(
|
| 1100 |
+
x=true_coordinate.to(torch.float32), # [..., N_sample, N_atom, 3]
|
| 1101 |
+
x_target=pred_coordinate.to(
|
| 1102 |
+
torch.float32
|
| 1103 |
+
), # [..., N_sample, N_atom, 3]
|
| 1104 |
+
atom_weight=weight.to(
|
| 1105 |
+
torch.float32
|
| 1106 |
+
), # [N_atom] or [..., N_sample, N_atom]
|
| 1107 |
+
stop_gradient=True,
|
| 1108 |
+
) # [..., N_sample, N_atom, 3]
|
| 1109 |
+
true_coordinate_aligned = true_coordinate_aligned.to(d)
|
| 1110 |
+
|
| 1111 |
+
return (true_coordinate_aligned.detach(), weight.detach())
|
| 1112 |
+
|
| 1113 |
+
def forward(
|
| 1114 |
+
self,
|
| 1115 |
+
pred_coordinate: torch.Tensor,
|
| 1116 |
+
true_coordinate: torch.Tensor,
|
| 1117 |
+
coordinate_mask: torch.Tensor,
|
| 1118 |
+
is_dna: torch.Tensor,
|
| 1119 |
+
is_rna: torch.Tensor,
|
| 1120 |
+
is_ligand: torch.Tensor,
|
| 1121 |
+
per_sample_scale: torch.Tensor = None,
|
| 1122 |
+
) -> torch.Tensor:
|
| 1123 |
+
"""MSELoss
|
| 1124 |
+
|
| 1125 |
+
Args:
|
| 1126 |
+
pred_coordinate (torch.Tensor): the denoised coordinates from diffusion module.
|
| 1127 |
+
[..., N_sample, N_atom, 3]
|
| 1128 |
+
true_coordinate (torch.Tensor): the ground truth coordinates.
|
| 1129 |
+
[..., N_atom, 3]
|
| 1130 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist.
|
| 1131 |
+
[N_atom] or [..., N_atom]
|
| 1132 |
+
is_dna / is_rna / is_ligand (torch.Tensor): mol type mask.
|
| 1133 |
+
[N_atom] or [..., N_atom]
|
| 1134 |
+
per_sample_scale (torch.Tensor, optional): whether to scale the loss by the per-sample noise-level.
|
| 1135 |
+
[..., N_sample]
|
| 1136 |
+
|
| 1137 |
+
Returns:
|
| 1138 |
+
torch.Tensor: the weighted mse loss.
|
| 1139 |
+
[...] is self.reduction is None else []
|
| 1140 |
+
"""
|
| 1141 |
+
# True_coordinate_aligned: [..., N_sample, N_atom, 3]
|
| 1142 |
+
# Weight: [N_atom] or [..., N_sample, N_atom]
|
| 1143 |
+
with torch.no_grad():
|
| 1144 |
+
true_coordinate_aligned, weight = self.weighted_rigid_align(
|
| 1145 |
+
pred_coordinate=pred_coordinate,
|
| 1146 |
+
true_coordinate=true_coordinate,
|
| 1147 |
+
coordinate_mask=coordinate_mask,
|
| 1148 |
+
is_dna=is_dna,
|
| 1149 |
+
is_rna=is_rna,
|
| 1150 |
+
is_ligand=is_ligand,
|
| 1151 |
+
)
|
| 1152 |
+
|
| 1153 |
+
# Calculate MSE loss
|
| 1154 |
+
per_atom_se = ((pred_coordinate - true_coordinate_aligned) ** 2).sum(
|
| 1155 |
+
dim=-1
|
| 1156 |
+
) # [..., N_sample, N_atom]
|
| 1157 |
+
per_sample_weighted_mse = (weight * per_atom_se).sum(dim=-1) / (
|
| 1158 |
+
coordinate_mask.sum(dim=-1, keepdim=True) + self.eps
|
| 1159 |
+
) # [..., N_sample]
|
| 1160 |
+
|
| 1161 |
+
if per_sample_scale is not None:
|
| 1162 |
+
per_sample_weighted_mse = per_sample_weighted_mse * per_sample_scale
|
| 1163 |
+
|
| 1164 |
+
weighted_align_mse_loss = self.weight_mse * (per_sample_weighted_mse).mean(
|
| 1165 |
+
dim=-1
|
| 1166 |
+
) # [...]
|
| 1167 |
+
|
| 1168 |
+
loss = loss_reduction(weighted_align_mse_loss, method=self.reduction)
|
| 1169 |
+
|
| 1170 |
+
return loss
|
| 1171 |
+
|
| 1172 |
+
|
| 1173 |
+
class PLDDTLoss(nn.Module):
|
| 1174 |
+
"""
|
| 1175 |
+
Implements PLDDT Loss in AF3, different from the paper description.
|
| 1176 |
+
Main changes:
|
| 1177 |
+
1. use difference of distance instead of predicted distance when calculating plddt
|
| 1178 |
+
2. normalize each plddt score within 0-1
|
| 1179 |
+
"""
|
| 1180 |
+
|
| 1181 |
+
def __init__(
|
| 1182 |
+
self,
|
| 1183 |
+
min_bin: float = 0,
|
| 1184 |
+
max_bin: float = 1,
|
| 1185 |
+
no_bins: int = 50,
|
| 1186 |
+
is_nucleotide_threshold: float = 30.0,
|
| 1187 |
+
is_not_nucleotide_threshold: float = 15.0,
|
| 1188 |
+
eps: float = 1e-6,
|
| 1189 |
+
normalize: bool = True,
|
| 1190 |
+
reduction: str = "mean",
|
| 1191 |
+
) -> None:
|
| 1192 |
+
"""PLDDT loss
|
| 1193 |
+
This loss are between atoms l and m (has some filters) in the mini-rollout prediction
|
| 1194 |
+
|
| 1195 |
+
Args:
|
| 1196 |
+
min_bin (float, optional): min boundary of bins. Defaults to 0.
|
| 1197 |
+
max_bin (float, optional): max boundary of bins. Defaults to 1.
|
| 1198 |
+
no_bins (int, optional): number of bins. Defaults to 50.
|
| 1199 |
+
is_nucleotide_threshold (float, optional): threshold for nucleotide atoms. Defaults 30.0.
|
| 1200 |
+
is_not_nucleotide_threshold (float, optional): threshold for non-nucleotide atoms. Defaults 15.0
|
| 1201 |
+
eps (float, optional): small number added to denominator. Defaults to 1e-6.
|
| 1202 |
+
reduction (str, optional): reduction method for the batch dims. Defaults to mean.
|
| 1203 |
+
"""
|
| 1204 |
+
super(PLDDTLoss, self).__init__()
|
| 1205 |
+
self.normalize = normalize
|
| 1206 |
+
self.min_bin = min_bin
|
| 1207 |
+
self.max_bin = max_bin
|
| 1208 |
+
self.no_bins = no_bins
|
| 1209 |
+
self.eps = eps
|
| 1210 |
+
self.reduction = reduction
|
| 1211 |
+
self.is_nucleotide_threshold = is_nucleotide_threshold
|
| 1212 |
+
self.is_not_nucleotide_threshold = is_not_nucleotide_threshold
|
| 1213 |
+
|
| 1214 |
+
def calculate_label(
|
| 1215 |
+
self,
|
| 1216 |
+
pred_coordinate: torch.Tensor,
|
| 1217 |
+
true_coordinate: torch.Tensor,
|
| 1218 |
+
is_nucleotide: torch.Tensor,
|
| 1219 |
+
is_polymer: torch.Tensor,
|
| 1220 |
+
rep_atom_mask: torch.Tensor,
|
| 1221 |
+
) -> torch.Tensor:
|
| 1222 |
+
"""calculate the lddt as described in Sec 4.3.1.
|
| 1223 |
+
|
| 1224 |
+
Args:
|
| 1225 |
+
pred_coordinate (torch.Tensor):
|
| 1226 |
+
[..., N_sample, N_atom, 3]
|
| 1227 |
+
true_coordinate (torch.Tensor):
|
| 1228 |
+
[..., N_atom]
|
| 1229 |
+
is_nucleotide (torch.Tensor):
|
| 1230 |
+
[N_atom] or [..., N_atom]
|
| 1231 |
+
is_polymer (torch.Tensor):
|
| 1232 |
+
[N_atom]
|
| 1233 |
+
rep_atom_mask (torch.Tensor):
|
| 1234 |
+
[N_atom]
|
| 1235 |
+
|
| 1236 |
+
Returns:
|
| 1237 |
+
torch.Tensor: per-atom lddt
|
| 1238 |
+
[..., N_sample, N_atom]
|
| 1239 |
+
"""
|
| 1240 |
+
|
| 1241 |
+
N_atom = true_coordinate.size(-2)
|
| 1242 |
+
atom_m_mask = (rep_atom_mask * is_polymer).bool() # [N_atom]
|
| 1243 |
+
# Distance: d_lm
|
| 1244 |
+
pred_d_lm = torch.cdist(
|
| 1245 |
+
pred_coordinate, pred_coordinate[..., atom_m_mask, :]
|
| 1246 |
+
) # [..., N_sample, N_atom, N_atom(m)]
|
| 1247 |
+
true_d_lm = torch.cdist(
|
| 1248 |
+
true_coordinate, true_coordinate[..., atom_m_mask, :]
|
| 1249 |
+
) # [..., N_atom, N_atom(m)]
|
| 1250 |
+
delta_d_lm = torch.abs(
|
| 1251 |
+
pred_d_lm - true_d_lm.unsqueeze(dim=-3)
|
| 1252 |
+
) # [..., N_sample, N_atom, N_atom(m)]
|
| 1253 |
+
|
| 1254 |
+
# Pair-wise lddt
|
| 1255 |
+
thresholds = [0.5, 1, 2, 4]
|
| 1256 |
+
lddt_lm = (
|
| 1257 |
+
torch.stack([delta_d_lm < t for t in thresholds], dim=-1)
|
| 1258 |
+
.to(dtype=delta_d_lm.dtype)
|
| 1259 |
+
.mean(dim=-1)
|
| 1260 |
+
) # [..., N_sample, N_atom, N_atom(m)]
|
| 1261 |
+
|
| 1262 |
+
# Select atoms that are within certain threshold to l in ground truth
|
| 1263 |
+
# Restrict to bespoke inclusion radius
|
| 1264 |
+
is_nucleotide = is_nucleotide[
|
| 1265 |
+
..., atom_m_mask
|
| 1266 |
+
].bool() # [N_atom(m)] or [..., N_atom(m)]
|
| 1267 |
+
locality_mask = (
|
| 1268 |
+
true_d_lm < self.is_nucleotide_threshold
|
| 1269 |
+
) * is_nucleotide.unsqueeze(dim=-2) + (
|
| 1270 |
+
true_d_lm < self.is_not_nucleotide_threshold
|
| 1271 |
+
) * (
|
| 1272 |
+
~is_nucleotide.unsqueeze(dim=-2)
|
| 1273 |
+
) # [..., N_atom, N_atom(m)]
|
| 1274 |
+
|
| 1275 |
+
# Remove self-distance computation
|
| 1276 |
+
diagonal_mask = ((1 - torch.eye(n=N_atom)).bool().to(true_d_lm.device))[
|
| 1277 |
+
..., atom_m_mask
|
| 1278 |
+
] # [N_atom, N_atom(m)]
|
| 1279 |
+
|
| 1280 |
+
pair_mask = (locality_mask * diagonal_mask).unsqueeze(
|
| 1281 |
+
dim=-3
|
| 1282 |
+
) # [..., 1, N_atom, N_atom(m)]
|
| 1283 |
+
|
| 1284 |
+
per_atom_lddt = torch.sum(
|
| 1285 |
+
lddt_lm * pair_mask, dim=-1, keepdim=True
|
| 1286 |
+
) # [..., N_sample, N_atom, 1]
|
| 1287 |
+
if self.normalize:
|
| 1288 |
+
per_atom_lddt = per_atom_lddt / (
|
| 1289 |
+
torch.sum(pair_mask.to(dtype=per_atom_lddt.dtype), dim=-1, keepdim=True)
|
| 1290 |
+
+ self.eps
|
| 1291 |
+
)
|
| 1292 |
+
# Distribute into bins
|
| 1293 |
+
boundaries = torch.linspace(
|
| 1294 |
+
start=self.min_bin,
|
| 1295 |
+
end=self.max_bin,
|
| 1296 |
+
steps=self.no_bins + 1,
|
| 1297 |
+
device=true_coordinate.device,
|
| 1298 |
+
) # [N_bins]
|
| 1299 |
+
|
| 1300 |
+
true_bins = torch.sum(
|
| 1301 |
+
per_atom_lddt > boundaries, dim=-1
|
| 1302 |
+
) # [..., N_sample, N_atom], range in [1, no_bins]
|
| 1303 |
+
true_bins = torch.clamp(
|
| 1304 |
+
true_bins, min=1, max=self.no_bins
|
| 1305 |
+
) # just in case bin=0/no_bins+1 occurs
|
| 1306 |
+
true_bins = F.one_hot(
|
| 1307 |
+
true_bins - 1, self.no_bins
|
| 1308 |
+
) # [..., N_sample, N_atom, N_bins]
|
| 1309 |
+
|
| 1310 |
+
return true_bins
|
| 1311 |
+
|
| 1312 |
+
def forward(
|
| 1313 |
+
self,
|
| 1314 |
+
logits: torch.Tensor,
|
| 1315 |
+
pred_coordinate: torch.Tensor,
|
| 1316 |
+
true_coordinate: torch.Tensor,
|
| 1317 |
+
coordinate_mask: torch.Tensor,
|
| 1318 |
+
is_nucleotide: torch.Tensor,
|
| 1319 |
+
is_polymer: torch.Tensor,
|
| 1320 |
+
rep_atom_mask: torch.Tensor,
|
| 1321 |
+
) -> torch.Tensor:
|
| 1322 |
+
"""PLDDT loss
|
| 1323 |
+
|
| 1324 |
+
Args:
|
| 1325 |
+
logits (torch.Tensor): logits
|
| 1326 |
+
[..., N_sample, N_atom, no_bins:=50]
|
| 1327 |
+
pred_coordinate (torch.Tensor): predicted coordinates
|
| 1328 |
+
[..., N_sample, N_atom, 3]
|
| 1329 |
+
true_coordinate (torch.Tensor): true coordinates
|
| 1330 |
+
[..., N_atom, 3]
|
| 1331 |
+
coordinate_mask (torch.Tensor): whether true coordinates exist
|
| 1332 |
+
[N_atom]
|
| 1333 |
+
is_nucleotide (torch.Tensor): "is_rna" or "is_dna"
|
| 1334 |
+
[N_atom]
|
| 1335 |
+
is_polymer (torch.Tensor): not "is_ligand"
|
| 1336 |
+
[N_atom]
|
| 1337 |
+
rep_atom_mask (torch.Tensor): representative atom of each token
|
| 1338 |
+
[N_atom]
|
| 1339 |
+
|
| 1340 |
+
Returns:
|
| 1341 |
+
torch.Tensor: the return loss
|
| 1342 |
+
[...] if self.reduction is None else []
|
| 1343 |
+
"""
|
| 1344 |
+
assert (
|
| 1345 |
+
is_nucleotide.shape
|
| 1346 |
+
== is_polymer.shape
|
| 1347 |
+
== rep_atom_mask.shape
|
| 1348 |
+
== coordinate_mask.shape
|
| 1349 |
+
== coordinate_mask.view(-1).shape
|
| 1350 |
+
)
|
| 1351 |
+
|
| 1352 |
+
coordinate_mask = coordinate_mask.bool()
|
| 1353 |
+
rep_atom_mask = rep_atom_mask.bool()
|
| 1354 |
+
is_nucleotide = is_nucleotide.bool()
|
| 1355 |
+
is_polymer = is_polymer.bool()
|
| 1356 |
+
|
| 1357 |
+
with torch.no_grad():
|
| 1358 |
+
true_bins = self.calculate_label(
|
| 1359 |
+
pred_coordinate=pred_coordinate[..., coordinate_mask, :],
|
| 1360 |
+
true_coordinate=true_coordinate[..., coordinate_mask, :],
|
| 1361 |
+
is_nucleotide=is_nucleotide[coordinate_mask],
|
| 1362 |
+
is_polymer=is_polymer[coordinate_mask],
|
| 1363 |
+
rep_atom_mask=rep_atom_mask[coordinate_mask],
|
| 1364 |
+
).detach() # [..., N_sample, N_atom_with_coords, N_bins]
|
| 1365 |
+
|
| 1366 |
+
plddt_loss = softmax_cross_entropy(
|
| 1367 |
+
logits=logits[..., coordinate_mask, :],
|
| 1368 |
+
labels=true_bins,
|
| 1369 |
+
) # [..., N_sample, N_atom_with_coords]
|
| 1370 |
+
|
| 1371 |
+
# Average over atoms
|
| 1372 |
+
plddt_loss = plddt_loss.mean(dim=-1) # [..., N_sample]
|
| 1373 |
+
|
| 1374 |
+
# Average over samples
|
| 1375 |
+
plddt_loss = plddt_loss.mean(dim=-1) # [...]
|
| 1376 |
+
|
| 1377 |
+
return loss_reduction(plddt_loss, method=self.reduction)
|
| 1378 |
+
|
| 1379 |
+
|
| 1380 |
+
class ProtenixLoss(nn.Module):
|
| 1381 |
+
"""Aggregation of the various losses"""
|
| 1382 |
+
|
| 1383 |
+
def __init__(self, configs) -> None:
|
| 1384 |
+
super(ProtenixLoss, self).__init__()
|
| 1385 |
+
self.configs = configs
|
| 1386 |
+
|
| 1387 |
+
self.alpha_confidence = self.configs.loss.weight.alpha_confidence
|
| 1388 |
+
self.alpha_pae = self.configs.loss.weight.alpha_pae
|
| 1389 |
+
self.alpha_except_pae = self.configs.loss.weight.alpha_except_pae
|
| 1390 |
+
self.alpha_diffusion = self.configs.loss.weight.alpha_diffusion
|
| 1391 |
+
self.alpha_distogram = self.configs.loss.weight.alpha_distogram
|
| 1392 |
+
self.alpha_bond = self.configs.loss.weight.alpha_bond
|
| 1393 |
+
self.weight_smooth_lddt = self.configs.loss.weight.smooth_lddt
|
| 1394 |
+
|
| 1395 |
+
self.lddt_radius = {
|
| 1396 |
+
"is_nucleotide_threshold": 30.0,
|
| 1397 |
+
"is_not_nucleotide_threshold": 15.0,
|
| 1398 |
+
}
|
| 1399 |
+
|
| 1400 |
+
self.loss_weight = {
|
| 1401 |
+
# confidence
|
| 1402 |
+
"plddt_loss": self.alpha_confidence * self.alpha_except_pae,
|
| 1403 |
+
"pde_loss": self.alpha_confidence * self.alpha_except_pae,
|
| 1404 |
+
"resolved_loss": self.alpha_confidence * self.alpha_except_pae,
|
| 1405 |
+
"pae_loss": self.alpha_confidence * self.alpha_pae,
|
| 1406 |
+
# diffusion
|
| 1407 |
+
"mse_loss": self.alpha_diffusion,
|
| 1408 |
+
"bond_loss": self.alpha_diffusion * self.alpha_bond,
|
| 1409 |
+
"smooth_lddt_loss": self.alpha_diffusion
|
| 1410 |
+
* self.weight_smooth_lddt, # Different from AF3 appendix eq(6), where smooth_lddt has no weight
|
| 1411 |
+
# distogram
|
| 1412 |
+
"distogram_loss": self.alpha_distogram,
|
| 1413 |
+
}
|
| 1414 |
+
|
| 1415 |
+
# Loss
|
| 1416 |
+
self.plddt_loss = PLDDTLoss(**configs.loss.plddt, **self.lddt_radius)
|
| 1417 |
+
self.pde_loss = PDELoss(**configs.loss.pde)
|
| 1418 |
+
self.resolved_loss = ExperimentallyResolvedLoss(**configs.loss.resolved)
|
| 1419 |
+
self.pae_loss = PAELoss(**configs.loss.pae)
|
| 1420 |
+
self.mse_loss = MSELoss(**configs.loss.diffusion.mse)
|
| 1421 |
+
self.bond_loss = BondLoss(**configs.loss.diffusion.bond)
|
| 1422 |
+
self.smooth_lddt_loss = SmoothLDDTLoss(**configs.loss.diffusion.smooth_lddt)
|
| 1423 |
+
self.distogram_loss = DistogramLoss(**configs.loss.distogram)
|
| 1424 |
+
|
| 1425 |
+
def calculate_label(
|
| 1426 |
+
self,
|
| 1427 |
+
feat_dict: dict[str, Any],
|
| 1428 |
+
label_dict: dict[str, Any],
|
| 1429 |
+
) -> dict[str, Any]:
|
| 1430 |
+
"""calculate true distance, and atom pair mask
|
| 1431 |
+
|
| 1432 |
+
Args:
|
| 1433 |
+
feat_dict (dict): Feature dictionary containing additional features.
|
| 1434 |
+
label_dict (dict): Label dictionary containing ground truth data.
|
| 1435 |
+
|
| 1436 |
+
Returns:
|
| 1437 |
+
label_dict (dict): with the following updates:
|
| 1438 |
+
distance (torch.Tensor): true atom-atom distance.
|
| 1439 |
+
[..., N_atom, N_atom]
|
| 1440 |
+
distance_mask (torch.Tensor): atom-atom mask indicating whether true distance exists.
|
| 1441 |
+
[..., N_atom, N_atom]
|
| 1442 |
+
"""
|
| 1443 |
+
# Distance mask
|
| 1444 |
+
distance_mask = (
|
| 1445 |
+
label_dict["coordinate_mask"][..., None]
|
| 1446 |
+
* label_dict["coordinate_mask"][..., None, :]
|
| 1447 |
+
)
|
| 1448 |
+
# Distances for all atom pairs
|
| 1449 |
+
# Note: we convert to bf16 for saving cuda memory, if performance drops, do not convert it
|
| 1450 |
+
distance = (
|
| 1451 |
+
cdist(label_dict["coordinate"], label_dict["coordinate"]) * distance_mask
|
| 1452 |
+
).to(
|
| 1453 |
+
label_dict["coordinate"].dtype
|
| 1454 |
+
) # [..., N_atom, N_atom]
|
| 1455 |
+
|
| 1456 |
+
lddt_mask = compute_lddt_mask(
|
| 1457 |
+
true_distance=distance,
|
| 1458 |
+
distance_mask=distance_mask,
|
| 1459 |
+
is_nucleotide=feat_dict["is_rna"].bool() + feat_dict["is_dna"].bool(),
|
| 1460 |
+
**self.lddt_radius,
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
label_dict["lddt_mask"] = lddt_mask
|
| 1464 |
+
label_dict["distance_mask"] = distance_mask
|
| 1465 |
+
if not self.configs.loss_metrics_sparse_enable:
|
| 1466 |
+
label_dict["distance"] = distance
|
| 1467 |
+
del distance, distance_mask, lddt_mask
|
| 1468 |
+
return label_dict
|
| 1469 |
+
|
| 1470 |
+
def calculate_prediction(
|
| 1471 |
+
self,
|
| 1472 |
+
pred_dict: dict[str, torch.Tensor],
|
| 1473 |
+
) -> dict[str, torch.Tensor]:
|
| 1474 |
+
"""get more predictions used for calculating difference losses
|
| 1475 |
+
|
| 1476 |
+
Args:
|
| 1477 |
+
pred_dict (dict[str, torch.Tensor]): raw prediction dict given by the model
|
| 1478 |
+
|
| 1479 |
+
Returns:
|
| 1480 |
+
dict[str, torch.Tensor]: updated predictions
|
| 1481 |
+
"""
|
| 1482 |
+
if not self.configs.loss_metrics_sparse_enable:
|
| 1483 |
+
pred_dict["distance"] = torch.cdist(
|
| 1484 |
+
pred_dict["coordinate"], pred_dict["coordinate"]
|
| 1485 |
+
).to(
|
| 1486 |
+
pred_dict["coordinate"].dtype
|
| 1487 |
+
) # [..., N_atom, N_atom]
|
| 1488 |
+
return pred_dict
|
| 1489 |
+
|
| 1490 |
+
def aggregate_losses(
|
| 1491 |
+
self, loss_fns: dict, has_valid_resolution: Optional[torch.Tensor] = None
|
| 1492 |
+
) -> tuple[torch.Tensor, dict]:
|
| 1493 |
+
"""
|
| 1494 |
+
Aggregates multiple loss functions and their respective metrics.
|
| 1495 |
+
|
| 1496 |
+
Args:
|
| 1497 |
+
loss_fns (dict): Dictionary of loss functions to be aggregated.
|
| 1498 |
+
has_valid_resolution (Optional[torch.Tensor]): Tensor indicating valid resolutions. Defaults to None.
|
| 1499 |
+
|
| 1500 |
+
Returns:
|
| 1501 |
+
tuple[torch.Tensor, dict]:
|
| 1502 |
+
- cum_loss (torch.Tensor): Cumulative loss.
|
| 1503 |
+
- all_metrics (dict): Dictionary containing all metrics.
|
| 1504 |
+
"""
|
| 1505 |
+
cum_loss = 0.0
|
| 1506 |
+
all_metrics = {}
|
| 1507 |
+
for loss_name, loss_fn in loss_fns.items():
|
| 1508 |
+
weight = self.loss_weight[loss_name]
|
| 1509 |
+
loss_outputs = loss_fn()
|
| 1510 |
+
if isinstance(loss_outputs, tuple):
|
| 1511 |
+
loss, metrics = loss_outputs
|
| 1512 |
+
else:
|
| 1513 |
+
assert isinstance(loss_outputs, torch.Tensor)
|
| 1514 |
+
loss, metrics = loss_outputs, {}
|
| 1515 |
+
|
| 1516 |
+
all_metrics.update(
|
| 1517 |
+
{f"{loss_name}/{key}": val for key, val in metrics.items()}
|
| 1518 |
+
)
|
| 1519 |
+
if torch.isnan(loss) or torch.isinf(loss):
|
| 1520 |
+
logging.warning(f"{loss_name} loss is NaN. Skipping...")
|
| 1521 |
+
if (
|
| 1522 |
+
(has_valid_resolution is not None)
|
| 1523 |
+
and (has_valid_resolution.sum() == 0)
|
| 1524 |
+
and (
|
| 1525 |
+
loss_name in ["plddt_loss", "pde_loss", "resolved_loss", "pae_loss"]
|
| 1526 |
+
)
|
| 1527 |
+
):
|
| 1528 |
+
loss = 0.0 * loss
|
| 1529 |
+
else:
|
| 1530 |
+
all_metrics[loss_name] = loss.detach().clone()
|
| 1531 |
+
all_metrics[f"weighted_{loss_name}"] = weight * loss.detach().clone()
|
| 1532 |
+
|
| 1533 |
+
cum_loss = cum_loss + weight * loss
|
| 1534 |
+
all_metrics["loss"] = cum_loss.detach().clone()
|
| 1535 |
+
|
| 1536 |
+
return cum_loss, all_metrics
|
| 1537 |
+
|
| 1538 |
+
def calculate_losses(
|
| 1539 |
+
self,
|
| 1540 |
+
feat_dict: dict[str, Any],
|
| 1541 |
+
pred_dict: dict[str, torch.Tensor],
|
| 1542 |
+
label_dict: dict[str, Any],
|
| 1543 |
+
mode: str = "train",
|
| 1544 |
+
) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
|
| 1545 |
+
"""
|
| 1546 |
+
Calculate the cumulative loss and aggregated metrics for the given predictions and labels.
|
| 1547 |
+
|
| 1548 |
+
Args:
|
| 1549 |
+
feat_dict (dict[str, Any]): Feature dictionary containing additional features.
|
| 1550 |
+
pred_dict (dict[str, torch.Tensor]): Prediction dictionary containing model outputs.
|
| 1551 |
+
label_dict (dict[str, Any]): Label dictionary containing ground truth data.
|
| 1552 |
+
mode (str): Mode of operation ('train', 'eval', 'inference'). Defaults to 'train'.
|
| 1553 |
+
|
| 1554 |
+
Returns:
|
| 1555 |
+
tuple[torch.Tensor, dict[str, torch.Tensor]]:
|
| 1556 |
+
- cum_loss (torch.Tensor): Cumulative loss.
|
| 1557 |
+
- metrics (dict[str, torch.Tensor]): Dictionary containing aggregated metrics.
|
| 1558 |
+
"""
|
| 1559 |
+
assert mode in ["train", "eval", "inference"]
|
| 1560 |
+
if mode == "train":
|
| 1561 |
+
# Confidence Loss: use mini-rollout coordinates
|
| 1562 |
+
confidence_coordinate = "coordinate_mini"
|
| 1563 |
+
if not self.configs.train_confidence_only:
|
| 1564 |
+
# Scale diffusion loss with noise-level
|
| 1565 |
+
diffusion_per_sample_scale = (
|
| 1566 |
+
pred_dict["noise_level"] ** 2 + self.configs.sigma_data**2
|
| 1567 |
+
) / (self.configs.sigma_data * pred_dict["noise_level"]) ** 2
|
| 1568 |
+
|
| 1569 |
+
else:
|
| 1570 |
+
# Confidence Loss: use diffusion coordinates
|
| 1571 |
+
confidence_coordinate = "coordinate"
|
| 1572 |
+
# No scale is required
|
| 1573 |
+
diffusion_per_sample_scale = None
|
| 1574 |
+
|
| 1575 |
+
if self.configs.train_confidence_only and mode == "train":
|
| 1576 |
+
# Skip Diffusion Loss and distogram loss
|
| 1577 |
+
loss_fns = {}
|
| 1578 |
+
else:
|
| 1579 |
+
# Diffusion Loss: SmoothLDDTLoss / BondLoss / MSELoss
|
| 1580 |
+
loss_fns = {}
|
| 1581 |
+
if self.configs.loss.diffusion_lddt_loss_dense:
|
| 1582 |
+
loss_fns.update(
|
| 1583 |
+
{
|
| 1584 |
+
"smooth_lddt_loss": lambda: self.smooth_lddt_loss.dense_forward(
|
| 1585 |
+
pred_coordinate=pred_dict["coordinate"],
|
| 1586 |
+
true_coordinate=label_dict["coordinate"],
|
| 1587 |
+
lddt_mask=label_dict["lddt_mask"],
|
| 1588 |
+
diffusion_chunk_size=self.configs.loss.diffusion_lddt_chunk_size,
|
| 1589 |
+
) # it's faster is not OOM
|
| 1590 |
+
}
|
| 1591 |
+
)
|
| 1592 |
+
elif self.configs.loss.diffusion_sparse_loss_enable:
|
| 1593 |
+
loss_fns.update(
|
| 1594 |
+
{
|
| 1595 |
+
"smooth_lddt_loss": lambda: self.smooth_lddt_loss.sparse_forward(
|
| 1596 |
+
pred_coordinate=pred_dict["coordinate"],
|
| 1597 |
+
true_coordinate=label_dict["coordinate"],
|
| 1598 |
+
lddt_mask=label_dict["lddt_mask"],
|
| 1599 |
+
diffusion_chunk_size=self.configs.loss.diffusion_lddt_chunk_size,
|
| 1600 |
+
)
|
| 1601 |
+
}
|
| 1602 |
+
)
|
| 1603 |
+
else:
|
| 1604 |
+
loss_fns.update(
|
| 1605 |
+
{
|
| 1606 |
+
"smooth_lddt_loss": lambda: self.smooth_lddt_loss(
|
| 1607 |
+
pred_distance=pred_dict["distance"],
|
| 1608 |
+
true_distance=label_dict["distance"],
|
| 1609 |
+
distance_mask=label_dict["distance_mask"],
|
| 1610 |
+
lddt_mask=label_dict["lddt_mask"],
|
| 1611 |
+
diffusion_chunk_size=self.configs.loss.diffusion_lddt_chunk_size,
|
| 1612 |
+
)
|
| 1613 |
+
}
|
| 1614 |
+
)
|
| 1615 |
+
loss_fns.update(
|
| 1616 |
+
{
|
| 1617 |
+
"bond_loss": lambda: (
|
| 1618 |
+
self.bond_loss.sparse_forward(
|
| 1619 |
+
pred_coordinate=pred_dict["coordinate"],
|
| 1620 |
+
true_coordinate=label_dict["coordinate"],
|
| 1621 |
+
distance_mask=label_dict["distance_mask"],
|
| 1622 |
+
bond_mask=feat_dict["bond_mask"],
|
| 1623 |
+
per_sample_scale=diffusion_per_sample_scale,
|
| 1624 |
+
)
|
| 1625 |
+
if self.configs.loss.diffusion_sparse_loss_enable
|
| 1626 |
+
else self.bond_loss(
|
| 1627 |
+
pred_distance=pred_dict["distance"],
|
| 1628 |
+
true_distance=label_dict["distance"],
|
| 1629 |
+
distance_mask=label_dict["distance_mask"],
|
| 1630 |
+
bond_mask=feat_dict["bond_mask"],
|
| 1631 |
+
per_sample_scale=diffusion_per_sample_scale,
|
| 1632 |
+
diffusion_chunk_size=self.configs.loss.diffusion_bond_chunk_size,
|
| 1633 |
+
)
|
| 1634 |
+
),
|
| 1635 |
+
"mse_loss": lambda: self.mse_loss(
|
| 1636 |
+
pred_coordinate=pred_dict["coordinate"],
|
| 1637 |
+
true_coordinate=label_dict["coordinate"],
|
| 1638 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1639 |
+
is_rna=feat_dict["is_rna"],
|
| 1640 |
+
is_dna=feat_dict["is_dna"],
|
| 1641 |
+
is_ligand=feat_dict["is_ligand"],
|
| 1642 |
+
per_sample_scale=diffusion_per_sample_scale,
|
| 1643 |
+
),
|
| 1644 |
+
}
|
| 1645 |
+
)
|
| 1646 |
+
# Distogram Loss
|
| 1647 |
+
if "distogram" in pred_dict:
|
| 1648 |
+
loss_fns.update(
|
| 1649 |
+
{
|
| 1650 |
+
"distogram_loss": lambda: self.distogram_loss(
|
| 1651 |
+
logits=pred_dict["distogram"],
|
| 1652 |
+
true_coordinate=label_dict["coordinate"],
|
| 1653 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1654 |
+
rep_atom_mask=feat_dict["distogram_rep_atom_mask"],
|
| 1655 |
+
)
|
| 1656 |
+
}
|
| 1657 |
+
)
|
| 1658 |
+
|
| 1659 |
+
# Confidence Loss:
|
| 1660 |
+
# Only when resoluton is in [min_resolution, max_resolution] the confidence loss is considered
|
| 1661 |
+
# NOTE: here we assume batch_size == 1
|
| 1662 |
+
resolution = feat_dict["resolution"].item()
|
| 1663 |
+
has_valid_resolution = (resolution >= self.configs.loss.resolution.min) & (
|
| 1664 |
+
resolution <= self.configs.loss.resolution.max
|
| 1665 |
+
)
|
| 1666 |
+
|
| 1667 |
+
if has_valid_resolution:
|
| 1668 |
+
has_valid_resolution = torch.tensor(
|
| 1669 |
+
[1.0],
|
| 1670 |
+
dtype=label_dict["coordinate"].dtype,
|
| 1671 |
+
device=label_dict["coordinate"].device,
|
| 1672 |
+
)
|
| 1673 |
+
else:
|
| 1674 |
+
has_valid_resolution = torch.tensor(
|
| 1675 |
+
[0.0],
|
| 1676 |
+
dtype=label_dict["coordinate"].dtype,
|
| 1677 |
+
device=label_dict["coordinate"].device,
|
| 1678 |
+
)
|
| 1679 |
+
|
| 1680 |
+
if all(x in pred_dict for x in ["plddt", "pde", "pae", "resolved"]):
|
| 1681 |
+
loss_fns.update(
|
| 1682 |
+
{
|
| 1683 |
+
"plddt_loss": lambda: self.plddt_loss(
|
| 1684 |
+
logits=pred_dict["plddt"],
|
| 1685 |
+
pred_coordinate=pred_dict[confidence_coordinate].detach(),
|
| 1686 |
+
true_coordinate=label_dict["coordinate"],
|
| 1687 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1688 |
+
rep_atom_mask=feat_dict["plddt_m_rep_atom_mask"],
|
| 1689 |
+
is_nucleotide=feat_dict["is_rna"] + feat_dict["is_dna"],
|
| 1690 |
+
is_polymer=1 - feat_dict["is_ligand"],
|
| 1691 |
+
),
|
| 1692 |
+
"pde_loss": lambda: self.pde_loss(
|
| 1693 |
+
logits=pred_dict["pde"],
|
| 1694 |
+
pred_coordinate=pred_dict[confidence_coordinate].detach(),
|
| 1695 |
+
true_coordinate=label_dict["coordinate"],
|
| 1696 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1697 |
+
rep_atom_mask=feat_dict["distogram_rep_atom_mask"],
|
| 1698 |
+
),
|
| 1699 |
+
"resolved_loss": lambda: self.resolved_loss(
|
| 1700 |
+
logits=pred_dict["resolved"],
|
| 1701 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1702 |
+
),
|
| 1703 |
+
"pae_loss": lambda: self.pae_loss(
|
| 1704 |
+
logits=pred_dict["pae"],
|
| 1705 |
+
pred_coordinate=pred_dict[confidence_coordinate].detach(),
|
| 1706 |
+
true_coordinate=label_dict["coordinate"],
|
| 1707 |
+
coordinate_mask=label_dict["coordinate_mask"],
|
| 1708 |
+
frame_atom_index=feat_dict["frame_atom_index"],
|
| 1709 |
+
rep_atom_mask=feat_dict["pae_rep_atom_mask"],
|
| 1710 |
+
has_frame=feat_dict["has_frame"],
|
| 1711 |
+
),
|
| 1712 |
+
}
|
| 1713 |
+
)
|
| 1714 |
+
|
| 1715 |
+
cum_loss, metrics = self.aggregate_losses(loss_fns, has_valid_resolution)
|
| 1716 |
+
return cum_loss, metrics
|
| 1717 |
+
|
| 1718 |
+
def forward(
|
| 1719 |
+
self,
|
| 1720 |
+
feat_dict: dict[str, Any],
|
| 1721 |
+
pred_dict: dict[str, torch.Tensor],
|
| 1722 |
+
label_dict: dict[str, Any],
|
| 1723 |
+
mode: str = "train",
|
| 1724 |
+
) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
|
| 1725 |
+
"""
|
| 1726 |
+
Forward pass for calculating the cumulative loss and aggregated metrics.
|
| 1727 |
+
|
| 1728 |
+
Args:
|
| 1729 |
+
feat_dict (dict[str, Any]): Feature dictionary containing additional features.
|
| 1730 |
+
pred_dict (dict[str, torch.Tensor]): Prediction dictionary containing model outputs.
|
| 1731 |
+
label_dict (dict[str, Any]): Label dictionary containing ground truth data.
|
| 1732 |
+
mode (str): Mode of operation ('train', 'eval', 'inference'). Defaults to 'train'.
|
| 1733 |
+
|
| 1734 |
+
Returns:
|
| 1735 |
+
tuple[torch.Tensor, dict[str, torch.Tensor]]:
|
| 1736 |
+
- cum_loss (torch.Tensor): Cumulative loss.
|
| 1737 |
+
- losses (dict[str, torch.Tensor]): Dictionary containing aggregated metrics.
|
| 1738 |
+
"""
|
| 1739 |
+
diffusion_chunk_size = self.configs.loss.diffusion_chunk_size_outer
|
| 1740 |
+
assert mode in ["train", "eval", "inference"]
|
| 1741 |
+
# Pre-computations
|
| 1742 |
+
with torch.no_grad():
|
| 1743 |
+
label_dict = self.calculate_label(feat_dict, label_dict)
|
| 1744 |
+
|
| 1745 |
+
pred_dict = self.calculate_prediction(pred_dict)
|
| 1746 |
+
|
| 1747 |
+
if diffusion_chunk_size <= 0:
|
| 1748 |
+
# Calculate losses
|
| 1749 |
+
cum_loss, losses = self.calculate_losses(
|
| 1750 |
+
feat_dict=feat_dict,
|
| 1751 |
+
pred_dict=pred_dict,
|
| 1752 |
+
label_dict=label_dict,
|
| 1753 |
+
mode=mode,
|
| 1754 |
+
)
|
| 1755 |
+
else:
|
| 1756 |
+
if "coordinate" in pred_dict:
|
| 1757 |
+
N_sample = pred_dict["coordinate"].shape[-3]
|
| 1758 |
+
elif self.configs.train_confidence_only:
|
| 1759 |
+
N_sample = pred_dict["coordinate_mini"].shape[-3]
|
| 1760 |
+
else:
|
| 1761 |
+
raise KeyError("Missing key: coordinate (in pred_dict).")
|
| 1762 |
+
no_chunks = N_sample // diffusion_chunk_size + (
|
| 1763 |
+
N_sample % diffusion_chunk_size != 0
|
| 1764 |
+
)
|
| 1765 |
+
cum_loss = 0.0
|
| 1766 |
+
losses = {}
|
| 1767 |
+
for i in range(no_chunks):
|
| 1768 |
+
cur_sample_num = min(
|
| 1769 |
+
diffusion_chunk_size, N_sample - i * diffusion_chunk_size
|
| 1770 |
+
)
|
| 1771 |
+
pred_dict_i = {}
|
| 1772 |
+
for key, value in pred_dict.items():
|
| 1773 |
+
if key in ["coordinate"] and mode == "train":
|
| 1774 |
+
pred_dict_i[key] = value[
|
| 1775 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 1776 |
+
:,
|
| 1777 |
+
:,
|
| 1778 |
+
]
|
| 1779 |
+
elif (
|
| 1780 |
+
key in ["coordinate", "plddt", "pae", "pde", "resolved"]
|
| 1781 |
+
and mode != "train"
|
| 1782 |
+
):
|
| 1783 |
+
pred_dict_i[key] = value[
|
| 1784 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size,
|
| 1785 |
+
:,
|
| 1786 |
+
:,
|
| 1787 |
+
]
|
| 1788 |
+
elif key == "noise_level":
|
| 1789 |
+
pred_dict_i[key] = value[
|
| 1790 |
+
i * diffusion_chunk_size : (i + 1) * diffusion_chunk_size
|
| 1791 |
+
]
|
| 1792 |
+
else:
|
| 1793 |
+
pred_dict_i[key] = value
|
| 1794 |
+
pred_dict_i = self.calculate_prediction(pred_dict_i)
|
| 1795 |
+
cum_loss_i, losses_i = self.calculate_losses(
|
| 1796 |
+
feat_dict=feat_dict,
|
| 1797 |
+
pred_dict=pred_dict_i,
|
| 1798 |
+
label_dict=label_dict,
|
| 1799 |
+
mode=mode,
|
| 1800 |
+
)
|
| 1801 |
+
cum_loss += cum_loss_i * cur_sample_num
|
| 1802 |
+
# Aggregate metrics
|
| 1803 |
+
for key, value in losses_i.items():
|
| 1804 |
+
if key in losses:
|
| 1805 |
+
losses[key] += value * cur_sample_num
|
| 1806 |
+
else:
|
| 1807 |
+
losses[key] = value * cur_sample_num
|
| 1808 |
+
cum_loss /= N_sample
|
| 1809 |
+
for key in losses.keys():
|
| 1810 |
+
losses[key] /= N_sample
|
| 1811 |
+
|
| 1812 |
+
return cum_loss, losses
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/__init__.py
ADDED
|
File without changes
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/confidence.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Optional, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
|
| 20 |
+
from protenix.model.modules.pairformer import PairformerStack
|
| 21 |
+
from protenix.model.modules.primitives import LinearNoBias
|
| 22 |
+
from protenix.model.utils import broadcast_token_to_atom, one_hot
|
| 23 |
+
from protenix.openfold_local.model.primitives import LayerNorm
|
| 24 |
+
from protenix.utils.torch_utils import cdist
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ConfidenceHead(nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
Implements Algorithm 31 in AF3
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
n_blocks: int = 4,
|
| 35 |
+
c_s: int = 384,
|
| 36 |
+
c_z: int = 128,
|
| 37 |
+
c_s_inputs: int = 449,
|
| 38 |
+
b_pae: int = 64,
|
| 39 |
+
b_pde: int = 64,
|
| 40 |
+
b_plddt: int = 50,
|
| 41 |
+
b_resolved: int = 2,
|
| 42 |
+
max_atoms_per_token: int = 20,
|
| 43 |
+
pairformer_dropout: float = 0.0,
|
| 44 |
+
blocks_per_ckpt: Optional[int] = None,
|
| 45 |
+
distance_bin_start: float = 3.25,
|
| 46 |
+
distance_bin_end: float = 52.0,
|
| 47 |
+
distance_bin_step: float = 1.25,
|
| 48 |
+
stop_gradient: bool = True,
|
| 49 |
+
) -> None:
|
| 50 |
+
"""
|
| 51 |
+
Args:
|
| 52 |
+
n_blocks (int, optional): number of blocks for ConfidenceHead. Defaults to 4.
|
| 53 |
+
c_s (int, optional): hidden dim [for single embedding]. Defaults to 384.
|
| 54 |
+
c_z (int, optional): hidden dim [for pair embedding]. Defaults to 128.
|
| 55 |
+
c_s_inputs (int, optional): hidden dim [for single embedding from InputFeatureEmbedder]. Defaults to 449.
|
| 56 |
+
b_pae (int, optional): the bin number for pae. Defaults to 64.
|
| 57 |
+
b_pde (int, optional): the bin numer for pde. Defaults to 64.
|
| 58 |
+
b_plddt (int, optional): the bin number for plddt. Defaults to 50.
|
| 59 |
+
b_resolved (int, optional): the bin number for resolved. Defaults to 2.
|
| 60 |
+
max_atoms_per_token (int, optional): max atoms in a token. Defaults to 20.
|
| 61 |
+
pairformer_dropout (float, optional): dropout ratio for Pairformer. Defaults to 0.0.
|
| 62 |
+
blocks_per_ckpt: number of Pairformer blocks in each activation checkpoint
|
| 63 |
+
distance_bin_start (float, optional): Start of the distance bin range. Defaults to 3.375.
|
| 64 |
+
distance_bin_end (float, optional): End of the distance bin range. Defaults to 21.375.
|
| 65 |
+
distance_bin_step (float, optional): Step size for the distance bins. Defaults to 1.25.
|
| 66 |
+
stop_gradient (bool, optional): Whether to stop gradient propagation. Defaults to True.
|
| 67 |
+
"""
|
| 68 |
+
super(ConfidenceHead, self).__init__()
|
| 69 |
+
self.n_blocks = n_blocks
|
| 70 |
+
self.c_s = c_s
|
| 71 |
+
self.c_z = c_z
|
| 72 |
+
self.c_s_inputs = c_s_inputs
|
| 73 |
+
self.b_pae = b_pae
|
| 74 |
+
self.b_pde = b_pde
|
| 75 |
+
self.b_plddt = b_plddt
|
| 76 |
+
self.b_resolved = b_resolved
|
| 77 |
+
self.max_atoms_per_token = max_atoms_per_token
|
| 78 |
+
self.stop_gradient = stop_gradient
|
| 79 |
+
self.linear_no_bias_s1 = LinearNoBias(
|
| 80 |
+
in_features=self.c_s_inputs, out_features=self.c_z
|
| 81 |
+
)
|
| 82 |
+
self.linear_no_bias_s2 = LinearNoBias(
|
| 83 |
+
in_features=self.c_s_inputs, out_features=self.c_z
|
| 84 |
+
)
|
| 85 |
+
lower_bins = torch.arange(
|
| 86 |
+
distance_bin_start, distance_bin_end, distance_bin_step
|
| 87 |
+
)
|
| 88 |
+
upper_bins = torch.cat([lower_bins[1:], torch.tensor([1e6])])
|
| 89 |
+
|
| 90 |
+
self.lower_bins = nn.Parameter(lower_bins, requires_grad=False)
|
| 91 |
+
self.upper_bins = nn.Parameter(upper_bins, requires_grad=False)
|
| 92 |
+
self.num_bins = len(lower_bins) # + 1
|
| 93 |
+
|
| 94 |
+
self.linear_no_bias_d = LinearNoBias(
|
| 95 |
+
in_features=self.num_bins, out_features=self.c_z
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
self.pairformer_stack = PairformerStack(
|
| 99 |
+
c_z=self.c_z,
|
| 100 |
+
c_s=self.c_s,
|
| 101 |
+
n_blocks=n_blocks,
|
| 102 |
+
dropout=pairformer_dropout,
|
| 103 |
+
blocks_per_ckpt=blocks_per_ckpt,
|
| 104 |
+
)
|
| 105 |
+
self.linear_no_bias_pae = LinearNoBias(
|
| 106 |
+
in_features=self.c_z, out_features=self.b_pae
|
| 107 |
+
)
|
| 108 |
+
self.linear_no_bias_pde = LinearNoBias(
|
| 109 |
+
in_features=self.c_z, out_features=self.b_pde
|
| 110 |
+
)
|
| 111 |
+
self.plddt_weight = nn.Parameter(
|
| 112 |
+
data=torch.empty(size=(self.max_atoms_per_token, self.c_s, self.b_plddt))
|
| 113 |
+
)
|
| 114 |
+
self.resolved_weight = nn.Parameter(
|
| 115 |
+
data=torch.empty(size=(self.max_atoms_per_token, self.c_s, self.b_resolved))
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
self.linear_no_bias_s_inputs = LinearNoBias(self.c_s_inputs, self.c_s)
|
| 119 |
+
self.linear_no_bias_s_trunk = LinearNoBias(self.c_s, self.c_s)
|
| 120 |
+
self.layernorm_s_trunk = LayerNorm(self.c_s)
|
| 121 |
+
self.linear_no_bias_z_trunk = LinearNoBias(self.c_z, self.c_z)
|
| 122 |
+
self.layernorm_z_trunk = LayerNorm(self.c_z)
|
| 123 |
+
|
| 124 |
+
self.layernorm_no_bias_z_cat = nn.LayerNorm(self.c_z * 2, bias=False)
|
| 125 |
+
self.layernorm_no_bias_s_cat = nn.LayerNorm(self.c_s * 2, bias=False)
|
| 126 |
+
self.linear_no_bias_z_cat = LinearNoBias(self.c_z * 2, self.c_z)
|
| 127 |
+
self.linear_no_bias_s_cat = LinearNoBias(self.c_s * 2, self.c_s)
|
| 128 |
+
|
| 129 |
+
# Output layernorm
|
| 130 |
+
self.pae_ln = LayerNorm(self.c_z)
|
| 131 |
+
self.pde_ln = LayerNorm(self.c_z)
|
| 132 |
+
self.plddt_ln = LayerNorm(self.c_s)
|
| 133 |
+
self.resolved_ln = LayerNorm(self.c_s)
|
| 134 |
+
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
# Zero init for output layer (before softmax) to zero
|
| 137 |
+
nn.init.zeros_(self.linear_no_bias_pae.weight)
|
| 138 |
+
nn.init.zeros_(self.linear_no_bias_pde.weight)
|
| 139 |
+
nn.init.zeros_(self.plddt_weight)
|
| 140 |
+
nn.init.zeros_(self.resolved_weight)
|
| 141 |
+
|
| 142 |
+
# Zero init for trunk embedding input layer
|
| 143 |
+
# nn.init.zeros_(self.linear_no_bias_s_trunk.weight)
|
| 144 |
+
# nn.init.zeros_(self.linear_no_bias_z_trunk.weight)
|
| 145 |
+
|
| 146 |
+
def forward(
|
| 147 |
+
self,
|
| 148 |
+
input_feature_dict: dict[str, Union[torch.Tensor, int, float, dict]],
|
| 149 |
+
s_inputs: torch.Tensor,
|
| 150 |
+
s_trunk: torch.Tensor,
|
| 151 |
+
z_trunk: torch.Tensor,
|
| 152 |
+
pair_mask: torch.Tensor,
|
| 153 |
+
x_pred_coords: torch.Tensor,
|
| 154 |
+
use_memory_efficient_kernel: bool = False,
|
| 155 |
+
use_deepspeed_evo_attention: bool = False,
|
| 156 |
+
use_lma: bool = False,
|
| 157 |
+
inplace_safe: bool = False,
|
| 158 |
+
chunk_size: Optional[int] = None,
|
| 159 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 160 |
+
"""
|
| 161 |
+
Args:
|
| 162 |
+
input_feature_dict: Dictionary containing input features.
|
| 163 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 164 |
+
[..., N_tokens, c_s_inputs]
|
| 165 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 166 |
+
[..., N_tokens, c_s]
|
| 167 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 168 |
+
[..., N_tokens, N_tokens, c_z]
|
| 169 |
+
pair_mask (torch.Tensor): pair mask
|
| 170 |
+
[..., N_token, N_token]
|
| 171 |
+
x_pred_coords (torch.Tensor): predicted coordinates
|
| 172 |
+
[..., N_sample, N_atoms, 3]
|
| 173 |
+
use_memory_efficient_kernel (bool, optional): Whether to use memory-efficient kernel. Defaults to False.
|
| 174 |
+
use_deepspeed_evo_attention (bool, optional): Whether to use DeepSpeed evolutionary attention. Defaults to False.
|
| 175 |
+
use_lma (bool, optional): Whether to use low-memory attention. Defaults to False.
|
| 176 |
+
inplace_safe (bool, optional): Whether to use inplace operations. Defaults to False.
|
| 177 |
+
chunk_size (Optional[int], optional): Chunk size for memory-efficient operations. Defaults to None.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 181 |
+
- plddt_preds: Predicted pLDDT scores [..., N_sample, N_atom, plddt_bins].
|
| 182 |
+
- pae_preds: Predicted PAE scores [..., N_sample, N_token, N_token, pae_bins].
|
| 183 |
+
- pde_preds: Predicted PDE scores [..., N_sample, N_token, N_token, pde_bins].
|
| 184 |
+
- resolved_preds: Predicted resolved scores [..., N_sample, N_atom, 2].
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
if self.stop_gradient:
|
| 188 |
+
s_inputs = s_inputs.detach()
|
| 189 |
+
s_trunk = s_trunk.detach()
|
| 190 |
+
z_trunk = z_trunk.detach()
|
| 191 |
+
|
| 192 |
+
s_trunk = self.linear_no_bias_s_trunk(self.layernorm_s_trunk(s_trunk))
|
| 193 |
+
z_trunk = self.linear_no_bias_z_trunk(self.layernorm_z_trunk(z_trunk))
|
| 194 |
+
|
| 195 |
+
z_init = (
|
| 196 |
+
self.linear_no_bias_s1(s_inputs)[..., None, :, :]
|
| 197 |
+
+ self.linear_no_bias_s2(s_inputs)[..., None, :]
|
| 198 |
+
)
|
| 199 |
+
s_init = self.linear_no_bias_s_inputs(s_inputs)
|
| 200 |
+
s_trunk = torch.cat([s_init, s_trunk], dim=-1)
|
| 201 |
+
z_trunk = torch.cat([z_init, z_trunk], dim=-1)
|
| 202 |
+
|
| 203 |
+
s_trunk = self.linear_no_bias_s_cat(self.layernorm_no_bias_s_cat(s_trunk))
|
| 204 |
+
z_trunk = self.linear_no_bias_z_cat(self.layernorm_no_bias_z_cat(z_trunk))
|
| 205 |
+
|
| 206 |
+
if not self.training:
|
| 207 |
+
del z_init
|
| 208 |
+
torch.cuda.empty_cache()
|
| 209 |
+
|
| 210 |
+
x_rep_atom_mask = input_feature_dict[
|
| 211 |
+
"distogram_rep_atom_mask"
|
| 212 |
+
].bool() # [N_atom]
|
| 213 |
+
x_pred_rep_coords = x_pred_coords[..., x_rep_atom_mask, :]
|
| 214 |
+
N_sample = x_pred_rep_coords.size(-3)
|
| 215 |
+
|
| 216 |
+
plddt_preds, pae_preds, pde_preds, resolved_preds = [], [], [], []
|
| 217 |
+
for i in range(N_sample):
|
| 218 |
+
plddt_pred, pae_pred, pde_pred, resolved_pred = (
|
| 219 |
+
self.memory_efficient_forward(
|
| 220 |
+
input_feature_dict=input_feature_dict,
|
| 221 |
+
s_trunk=s_trunk,
|
| 222 |
+
z_pair=z_trunk,
|
| 223 |
+
pair_mask=pair_mask,
|
| 224 |
+
x_pred_rep_coords=x_pred_rep_coords[..., i, :, :],
|
| 225 |
+
use_memory_efficient_kernel=use_memory_efficient_kernel,
|
| 226 |
+
use_deepspeed_evo_attention=use_deepspeed_evo_attention,
|
| 227 |
+
use_lma=use_lma,
|
| 228 |
+
inplace_safe=inplace_safe,
|
| 229 |
+
chunk_size=chunk_size,
|
| 230 |
+
)
|
| 231 |
+
)
|
| 232 |
+
if z_trunk.shape[-2] > 2000 and (not self.training):
|
| 233 |
+
# cpu offload pae_preds/pde_preds
|
| 234 |
+
pae_pred = pae_pred.cpu()
|
| 235 |
+
pde_pred = pde_pred.cpu()
|
| 236 |
+
torch.cuda.empty_cache()
|
| 237 |
+
plddt_preds.append(plddt_pred)
|
| 238 |
+
pae_preds.append(pae_pred)
|
| 239 |
+
pde_preds.append(pde_pred)
|
| 240 |
+
resolved_preds.append(resolved_pred)
|
| 241 |
+
plddt_preds = torch.stack(
|
| 242 |
+
plddt_preds, dim=-3
|
| 243 |
+
) # [..., N_sample, N_atom, plddt_bins]
|
| 244 |
+
# Pae_preds/pde_preds single tensor will occupy 11.6G[BF16]/23.2G[FP32]
|
| 245 |
+
pae_preds = torch.stack(
|
| 246 |
+
pae_preds, dim=-4
|
| 247 |
+
) # [..., N_sample, N_token, N_token, pae_bins]
|
| 248 |
+
pde_preds = torch.stack(
|
| 249 |
+
pde_preds, dim=-4
|
| 250 |
+
) # [..., N_sample, N_token, N_token, pde_bins]
|
| 251 |
+
resolved_preds = torch.stack(
|
| 252 |
+
resolved_preds, dim=-3
|
| 253 |
+
) # [..., N_sample, N_atom, 2]
|
| 254 |
+
return plddt_preds, pae_preds, pde_preds, resolved_preds
|
| 255 |
+
|
| 256 |
+
def memory_efficient_forward(
|
| 257 |
+
self,
|
| 258 |
+
input_feature_dict: dict[str, Union[torch.Tensor, int, float, dict]],
|
| 259 |
+
s_trunk: torch.Tensor,
|
| 260 |
+
z_pair: torch.Tensor,
|
| 261 |
+
pair_mask: torch.Tensor,
|
| 262 |
+
x_pred_rep_coords: torch.Tensor,
|
| 263 |
+
use_memory_efficient_kernel: bool = False,
|
| 264 |
+
use_deepspeed_evo_attention: bool = False,
|
| 265 |
+
use_lma: bool = False,
|
| 266 |
+
inplace_safe: bool = False,
|
| 267 |
+
chunk_size: Optional[int] = None,
|
| 268 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 269 |
+
"""
|
| 270 |
+
Args:
|
| 271 |
+
...
|
| 272 |
+
x_pred_coords (torch.Tensor): predicted coordinates
|
| 273 |
+
[..., N_atoms, 3] # Note: N_sample = 1 for avoiding CUDA OOM
|
| 274 |
+
"""
|
| 275 |
+
# Embed pair distances of representative atoms:
|
| 276 |
+
distance_pred = cdist(
|
| 277 |
+
x_pred_rep_coords, x_pred_rep_coords
|
| 278 |
+
) # [..., N_tokens, N_tokens]
|
| 279 |
+
z_pair = z_pair + self.linear_no_bias_d(
|
| 280 |
+
one_hot(
|
| 281 |
+
x=distance_pred, lower_bins=self.lower_bins, upper_bins=self.upper_bins
|
| 282 |
+
)
|
| 283 |
+
) # [..., N_tokens, N_tokens, c_z]
|
| 284 |
+
# Line 4
|
| 285 |
+
s_single, z_pair = self.pairformer_stack(
|
| 286 |
+
s_trunk,
|
| 287 |
+
z_pair,
|
| 288 |
+
pair_mask,
|
| 289 |
+
use_memory_efficient_kernel=use_memory_efficient_kernel,
|
| 290 |
+
use_deepspeed_evo_attention=use_deepspeed_evo_attention,
|
| 291 |
+
use_lma=use_lma,
|
| 292 |
+
inplace_safe=inplace_safe,
|
| 293 |
+
chunk_size=chunk_size,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
pae_pred = self.linear_no_bias_pae(self.pae_ln(z_pair))
|
| 297 |
+
pde_pred = self.linear_no_bias_pde(
|
| 298 |
+
self.pde_ln(z_pair + z_pair.transpose(-2, -3))
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
atom_to_token_idx = input_feature_dict[
|
| 302 |
+
"atom_to_token_idx"
|
| 303 |
+
] # in range [0, N_token-1] shape: [N_atom]
|
| 304 |
+
atom_to_tokatom_idx = input_feature_dict[
|
| 305 |
+
"atom_to_tokatom_idx"
|
| 306 |
+
] # in range [0, max_atoms_per_token-1] shape: [N_atom] # influenced by crop
|
| 307 |
+
# Broadcast s_single: [N_tokens, c_s] -> [N_atoms, c_s]
|
| 308 |
+
a = broadcast_token_to_atom(
|
| 309 |
+
x_token=s_single, atom_to_token_idx=atom_to_token_idx
|
| 310 |
+
)
|
| 311 |
+
plddt_pred = torch.einsum(
|
| 312 |
+
"...nc,ncb->...nb", self.plddt_ln(a), self.plddt_weight[atom_to_tokatom_idx]
|
| 313 |
+
)
|
| 314 |
+
resolved_pred = torch.einsum(
|
| 315 |
+
"...nc,ncb->...nb",
|
| 316 |
+
self.resolved_ln(a),
|
| 317 |
+
self.resolved_weight[atom_to_tokatom_idx],
|
| 318 |
+
)
|
| 319 |
+
if not self.training and z_pair.shape[-2] > 2000:
|
| 320 |
+
torch.cuda.empty_cache()
|
| 321 |
+
return plddt_pred, pae_pred, pde_pred, resolved_pred
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/diffusion.py
ADDED
|
@@ -0,0 +1,541 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Optional, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
|
| 20 |
+
from protenix.model.modules.embedders import FourierEmbedding, RelativePositionEncoding
|
| 21 |
+
from protenix.model.modules.primitives import LinearNoBias, Transition
|
| 22 |
+
from protenix.model.modules.transformer import (
|
| 23 |
+
AtomAttentionDecoder,
|
| 24 |
+
AtomAttentionEncoder,
|
| 25 |
+
DiffusionTransformer,
|
| 26 |
+
)
|
| 27 |
+
from protenix.model.utils import expand_at_dim
|
| 28 |
+
from protenix.openfold_local.model.primitives import LayerNorm
|
| 29 |
+
from protenix.openfold_local.utils.checkpointing import get_checkpoint_fn
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class DiffusionConditioning(nn.Module):
|
| 33 |
+
"""
|
| 34 |
+
Implements Algorithm 21 in AF3
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
sigma_data: float = 16.0,
|
| 40 |
+
c_z: int = 128,
|
| 41 |
+
c_s: int = 384,
|
| 42 |
+
c_s_inputs: int = 449,
|
| 43 |
+
c_noise_embedding: int = 256,
|
| 44 |
+
) -> None:
|
| 45 |
+
"""
|
| 46 |
+
Args:
|
| 47 |
+
sigma_data (torch.float, optional): the standard deviation of the data. Defaults to 16.0.
|
| 48 |
+
c_z (int, optional): hidden dim [for pair embedding]. Defaults to 128.
|
| 49 |
+
c_s (int, optional): hidden dim [for single embedding]. Defaults to 384.
|
| 50 |
+
c_s_inputs (int, optional): input embedding dim from InputEmbedder. Defaults to 449.
|
| 51 |
+
c_noise_embedding (int, optional): noise embedding dim. Defaults to 256.
|
| 52 |
+
"""
|
| 53 |
+
super(DiffusionConditioning, self).__init__()
|
| 54 |
+
self.sigma_data = sigma_data
|
| 55 |
+
self.c_z = c_z
|
| 56 |
+
self.c_s = c_s
|
| 57 |
+
self.c_s_inputs = c_s_inputs
|
| 58 |
+
# Line1-Line3:
|
| 59 |
+
self.relpe = RelativePositionEncoding(c_z=c_z)
|
| 60 |
+
self.layernorm_z = LayerNorm(2 * self.c_z)
|
| 61 |
+
self.linear_no_bias_z = LinearNoBias(
|
| 62 |
+
in_features=2 * self.c_z, out_features=self.c_z
|
| 63 |
+
)
|
| 64 |
+
# Line3-Line5:
|
| 65 |
+
self.transition_z1 = Transition(c_in=self.c_z, n=2)
|
| 66 |
+
self.transition_z2 = Transition(c_in=self.c_z, n=2)
|
| 67 |
+
|
| 68 |
+
# Line6-Line7
|
| 69 |
+
self.layernorm_s = LayerNorm(self.c_s + self.c_s_inputs)
|
| 70 |
+
self.linear_no_bias_s = LinearNoBias(
|
| 71 |
+
in_features=self.c_s + self.c_s_inputs, out_features=self.c_s
|
| 72 |
+
)
|
| 73 |
+
# Line8-Line9
|
| 74 |
+
self.fourier_embedding = FourierEmbedding(c=c_noise_embedding)
|
| 75 |
+
self.layernorm_n = LayerNorm(c_noise_embedding)
|
| 76 |
+
self.linear_no_bias_n = LinearNoBias(
|
| 77 |
+
in_features=c_noise_embedding, out_features=self.c_s
|
| 78 |
+
)
|
| 79 |
+
# Line10-Line12
|
| 80 |
+
self.transition_s1 = Transition(c_in=self.c_s, n=2)
|
| 81 |
+
self.transition_s2 = Transition(c_in=self.c_s, n=2)
|
| 82 |
+
print(f"Diffusion Module has {self.sigma_data}")
|
| 83 |
+
|
| 84 |
+
def forward(
|
| 85 |
+
self,
|
| 86 |
+
t_hat_noise_level: torch.Tensor,
|
| 87 |
+
input_feature_dict: dict[str, Union[torch.Tensor, int, float, dict]],
|
| 88 |
+
s_inputs: torch.Tensor,
|
| 89 |
+
s_trunk: torch.Tensor,
|
| 90 |
+
z_trunk: torch.Tensor,
|
| 91 |
+
inplace_safe: bool = False,
|
| 92 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 93 |
+
"""
|
| 94 |
+
Args:
|
| 95 |
+
t_hat_noise_level (torch.Tensor): the noise level
|
| 96 |
+
[..., N_sample]
|
| 97 |
+
input_feature_dict (dict[str, Union[torch.Tensor, int, float, dict]]): input meta feature dict
|
| 98 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 99 |
+
[..., N_tokens, c_s_inputs]
|
| 100 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 101 |
+
[..., N_tokens, c_s]
|
| 102 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 103 |
+
[..., N_tokens, N_tokens, c_z]
|
| 104 |
+
inplace_safe (bool): Whether it is safe to use inplace operations.
|
| 105 |
+
Returns:
|
| 106 |
+
tuple[torch.Tensor, torch.Tensor]: embeddings s and z
|
| 107 |
+
- s (torch.Tensor): [..., N_sample, N_tokens, c_s]
|
| 108 |
+
- z (torch.Tensor): [..., N_tokens, N_tokens, c_z]
|
| 109 |
+
"""
|
| 110 |
+
# Pair conditioning
|
| 111 |
+
pair_z = torch.cat(
|
| 112 |
+
tensors=[z_trunk, self.relpe(input_feature_dict)], dim=-1
|
| 113 |
+
) # [..., N_tokens, N_tokens, 2*c_z]
|
| 114 |
+
pair_z = self.linear_no_bias_z(self.layernorm_z(pair_z))
|
| 115 |
+
if inplace_safe:
|
| 116 |
+
pair_z += self.transition_z1(pair_z)
|
| 117 |
+
pair_z += self.transition_z2(pair_z)
|
| 118 |
+
else:
|
| 119 |
+
pair_z = pair_z + self.transition_z1(pair_z)
|
| 120 |
+
pair_z = pair_z + self.transition_z2(pair_z)
|
| 121 |
+
# Single conditioning
|
| 122 |
+
single_s = torch.cat(
|
| 123 |
+
tensors=[s_trunk, s_inputs], dim=-1
|
| 124 |
+
) # [..., N_tokens, c_s + c_s_inputs]
|
| 125 |
+
single_s = self.linear_no_bias_s(self.layernorm_s(single_s))
|
| 126 |
+
noise_n = self.fourier_embedding(
|
| 127 |
+
t_hat_noise_level=torch.log(input=t_hat_noise_level / self.sigma_data) / 4
|
| 128 |
+
).to(
|
| 129 |
+
single_s.dtype
|
| 130 |
+
) # [..., N_sample, c_in]
|
| 131 |
+
single_s = single_s.unsqueeze(dim=-3) + self.linear_no_bias_n(
|
| 132 |
+
self.layernorm_n(noise_n)
|
| 133 |
+
).unsqueeze(
|
| 134 |
+
dim=-2
|
| 135 |
+
) # [..., N_sample, N_tokens, c_s]
|
| 136 |
+
if inplace_safe:
|
| 137 |
+
single_s += self.transition_s1(single_s)
|
| 138 |
+
single_s += self.transition_s2(single_s)
|
| 139 |
+
else:
|
| 140 |
+
single_s = single_s + self.transition_s1(single_s)
|
| 141 |
+
single_s = single_s + self.transition_s2(single_s)
|
| 142 |
+
if not self.training and pair_z.shape[-2] > 2000:
|
| 143 |
+
torch.cuda.empty_cache()
|
| 144 |
+
return single_s, pair_z
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class DiffusionSchedule:
|
| 148 |
+
def __init__(
|
| 149 |
+
self,
|
| 150 |
+
sigma_data: float = 16.0,
|
| 151 |
+
s_max: float = 160.0,
|
| 152 |
+
s_min: float = 4e-4,
|
| 153 |
+
p: float = 7.0,
|
| 154 |
+
dt: float = 1 / 200,
|
| 155 |
+
p_mean: float = -1.2,
|
| 156 |
+
p_std: float = 1.5,
|
| 157 |
+
) -> None:
|
| 158 |
+
"""
|
| 159 |
+
Args:
|
| 160 |
+
sigma_data (float, optional): The standard deviation of the data. Defaults to 16.0.
|
| 161 |
+
s_max (float, optional): The maximum noise level. Defaults to 160.0.
|
| 162 |
+
s_min (float, optional): The minimum noise level. Defaults to 4e-4.
|
| 163 |
+
p (float, optional): The exponent for the noise schedule. Defaults to 7.0.
|
| 164 |
+
dt (float, optional): The time step size. Defaults to 1/200.
|
| 165 |
+
p_mean (float, optional): The mean of the log-normal distribution for noise level sampling. Defaults to -1.2.
|
| 166 |
+
p_std (float, optional): The standard deviation of the log-normal distribution for noise level sampling. Defaults to 1.5.
|
| 167 |
+
"""
|
| 168 |
+
self.sigma_data = sigma_data
|
| 169 |
+
self.s_max = s_max
|
| 170 |
+
self.s_min = s_min
|
| 171 |
+
self.p = p
|
| 172 |
+
self.dt = dt
|
| 173 |
+
self.p_mean = p_mean
|
| 174 |
+
self.p_std = p_std
|
| 175 |
+
# self.T
|
| 176 |
+
self.T = int(1 / dt) + 1 # 201
|
| 177 |
+
|
| 178 |
+
def get_train_noise_schedule(self) -> torch.Tensor:
|
| 179 |
+
return self.sigma_data * torch.exp(self.p_mean + self.p_std * torch.randn(1))
|
| 180 |
+
|
| 181 |
+
def get_inference_noise_schedule(self) -> torch.Tensor:
|
| 182 |
+
time_step_lists = torch.arange(start=0, end=1 + 1e-10, step=self.dt)
|
| 183 |
+
inference_noise_schedule = (
|
| 184 |
+
self.sigma_data
|
| 185 |
+
* (
|
| 186 |
+
self.s_max ** (1 / self.p)
|
| 187 |
+
+ time_step_lists
|
| 188 |
+
* (self.s_min ** (1 / self.p) - self.s_max ** (1 / self.p))
|
| 189 |
+
)
|
| 190 |
+
** self.p
|
| 191 |
+
)
|
| 192 |
+
return inference_noise_schedule
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class DiffusionModule(nn.Module):
|
| 196 |
+
"""
|
| 197 |
+
Implements Algorithm 20 in AF3
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def __init__(
|
| 201 |
+
self,
|
| 202 |
+
sigma_data: float = 16.0,
|
| 203 |
+
c_atom: int = 128,
|
| 204 |
+
c_atompair: int = 16,
|
| 205 |
+
c_token: int = 768,
|
| 206 |
+
c_s: int = 384,
|
| 207 |
+
c_z: int = 128,
|
| 208 |
+
c_s_inputs: int = 449,
|
| 209 |
+
atom_encoder: dict[str, int] = {"n_blocks": 3, "n_heads": 4},
|
| 210 |
+
transformer: dict[str, int] = {"n_blocks": 24, "n_heads": 16},
|
| 211 |
+
atom_decoder: dict[str, int] = {"n_blocks": 3, "n_heads": 4},
|
| 212 |
+
blocks_per_ckpt: Optional[int] = None,
|
| 213 |
+
use_fine_grained_checkpoint: bool = False,
|
| 214 |
+
initialization: Optional[dict[str, Union[str, float, bool]]] = None,
|
| 215 |
+
) -> None:
|
| 216 |
+
"""
|
| 217 |
+
Args:
|
| 218 |
+
sigma_data (torch.float, optional): the standard deviation of the data. Defaults to 16.0.
|
| 219 |
+
c_atom (int, optional): embedding dim for atom feature. Defaults to 128.
|
| 220 |
+
c_atompair (int, optional): embedding dim for atompair feature. Defaults to 16.
|
| 221 |
+
c_token (int, optional): feature channel of token (single a). Defaults to 768.
|
| 222 |
+
c_s (int, optional): hidden dim [for single embedding]. Defaults to 384.
|
| 223 |
+
c_z (int, optional): hidden dim [for pair embedding]. Defaults to 128.
|
| 224 |
+
c_s_inputs (int, optional): hidden dim [for single input embedding]. Defaults to 449.
|
| 225 |
+
atom_encoder (dict[str, int], optional): configs in AtomAttentionEncoder. Defaults to {"n_blocks": 3, "n_heads": 4}.
|
| 226 |
+
transformer (dict[str, int], optional): configs in DiffusionTransformer. Defaults to {"n_blocks": 24, "n_heads": 16}.
|
| 227 |
+
atom_decoder (dict[str, int], optional): configs in AtomAttentionDecoder. Defaults to {"n_blocks": 3, "n_heads": 4}.
|
| 228 |
+
blocks_per_ckpt: number of atom_encoder/transformer/atom_decoder blocks in each activation checkpoint
|
| 229 |
+
Size of each chunk. A higher value corresponds to fewer
|
| 230 |
+
checkpoints, and trades memory for speed. If None, no checkpointing is performed.
|
| 231 |
+
use_fine_grained_checkpoint: whether use fine-gained checkpoint for finetuning stage 2
|
| 232 |
+
only effective if blocks_per_ckpt is not None.
|
| 233 |
+
initialization: initialize the diffusion module according to initialization config.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
super(DiffusionModule, self).__init__()
|
| 237 |
+
self.sigma_data = sigma_data
|
| 238 |
+
self.c_atom = c_atom
|
| 239 |
+
self.c_atompair = c_atompair
|
| 240 |
+
self.c_token = c_token
|
| 241 |
+
self.c_s_inputs = c_s_inputs
|
| 242 |
+
self.c_s = c_s
|
| 243 |
+
self.c_z = c_z
|
| 244 |
+
|
| 245 |
+
# Grad checkpoint setting
|
| 246 |
+
self.blocks_per_ckpt = blocks_per_ckpt
|
| 247 |
+
self.use_fine_grained_checkpoint = use_fine_grained_checkpoint
|
| 248 |
+
|
| 249 |
+
self.diffusion_conditioning = DiffusionConditioning(
|
| 250 |
+
sigma_data=self.sigma_data, c_z=c_z, c_s=c_s, c_s_inputs=c_s_inputs
|
| 251 |
+
)
|
| 252 |
+
self.atom_attention_encoder = AtomAttentionEncoder(
|
| 253 |
+
**atom_encoder,
|
| 254 |
+
c_atom=c_atom,
|
| 255 |
+
c_atompair=c_atompair,
|
| 256 |
+
c_token=c_token,
|
| 257 |
+
has_coords=True,
|
| 258 |
+
c_s=c_s,
|
| 259 |
+
c_z=c_z,
|
| 260 |
+
blocks_per_ckpt=blocks_per_ckpt,
|
| 261 |
+
)
|
| 262 |
+
# Alg20: line4
|
| 263 |
+
self.layernorm_s = LayerNorm(c_s)
|
| 264 |
+
self.linear_no_bias_s = LinearNoBias(in_features=c_s, out_features=c_token)
|
| 265 |
+
self.diffusion_transformer = DiffusionTransformer(
|
| 266 |
+
**transformer,
|
| 267 |
+
c_a=c_token,
|
| 268 |
+
c_s=c_s,
|
| 269 |
+
c_z=c_z,
|
| 270 |
+
blocks_per_ckpt=blocks_per_ckpt,
|
| 271 |
+
)
|
| 272 |
+
self.layernorm_a = LayerNorm(c_token)
|
| 273 |
+
self.atom_attention_decoder = AtomAttentionDecoder(
|
| 274 |
+
**atom_decoder,
|
| 275 |
+
c_token=c_token,
|
| 276 |
+
c_atom=c_atom,
|
| 277 |
+
c_atompair=c_atompair,
|
| 278 |
+
blocks_per_ckpt=blocks_per_ckpt,
|
| 279 |
+
)
|
| 280 |
+
self.init_parameters(initialization)
|
| 281 |
+
|
| 282 |
+
def init_parameters(self, initialization: dict):
|
| 283 |
+
"""
|
| 284 |
+
Initializes the parameters of the diffusion module according to the provided initialization configuration.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
initialization (dict): A dictionary containing initialization settings.
|
| 288 |
+
"""
|
| 289 |
+
if initialization.get("zero_init_condition_transition", False):
|
| 290 |
+
self.diffusion_conditioning.transition_z1.zero_init()
|
| 291 |
+
self.diffusion_conditioning.transition_z2.zero_init()
|
| 292 |
+
self.diffusion_conditioning.transition_s1.zero_init()
|
| 293 |
+
self.diffusion_conditioning.transition_s2.zero_init()
|
| 294 |
+
|
| 295 |
+
self.atom_attention_encoder.linear_init(
|
| 296 |
+
zero_init_atom_encoder_residual_linear=initialization.get(
|
| 297 |
+
"zero_init_atom_encoder_residual_linear", False
|
| 298 |
+
),
|
| 299 |
+
he_normal_init_atom_encoder_small_mlp=initialization.get(
|
| 300 |
+
"he_normal_init_atom_encoder_small_mlp", False
|
| 301 |
+
),
|
| 302 |
+
he_normal_init_atom_encoder_output=initialization.get(
|
| 303 |
+
"he_normal_init_atom_encoder_output", False
|
| 304 |
+
),
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
if initialization.get("glorot_init_self_attention", False):
|
| 308 |
+
for (
|
| 309 |
+
block
|
| 310 |
+
) in (
|
| 311 |
+
self.atom_attention_encoder.atom_transformer.diffusion_transformer.blocks
|
| 312 |
+
):
|
| 313 |
+
block.attention_pair_bias.glorot_init()
|
| 314 |
+
|
| 315 |
+
for block in self.diffusion_transformer.blocks:
|
| 316 |
+
if initialization.get("zero_init_adaln", False):
|
| 317 |
+
block.attention_pair_bias.layernorm_a.zero_init()
|
| 318 |
+
block.conditioned_transition_block.adaln.zero_init()
|
| 319 |
+
if initialization.get("zero_init_residual_condition_transition", False):
|
| 320 |
+
nn.init.zeros_(
|
| 321 |
+
block.conditioned_transition_block.linear_nobias_b.weight
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
if initialization.get("zero_init_atom_decoder_linear", False):
|
| 325 |
+
nn.init.zeros_(self.atom_attention_decoder.linear_no_bias_a.weight)
|
| 326 |
+
|
| 327 |
+
if initialization.get("zero_init_dit_output", False):
|
| 328 |
+
nn.init.zeros_(self.atom_attention_decoder.linear_no_bias_out.weight)
|
| 329 |
+
|
| 330 |
+
def f_forward(
|
| 331 |
+
self,
|
| 332 |
+
r_noisy: torch.Tensor,
|
| 333 |
+
t_hat_noise_level: torch.Tensor,
|
| 334 |
+
input_feature_dict: dict[str, Union[torch.Tensor, int, float, dict]],
|
| 335 |
+
s_inputs: torch.Tensor,
|
| 336 |
+
s_trunk: torch.Tensor,
|
| 337 |
+
z_trunk: torch.Tensor,
|
| 338 |
+
inplace_safe: bool = False,
|
| 339 |
+
chunk_size: Optional[int] = None,
|
| 340 |
+
) -> torch.Tensor:
|
| 341 |
+
"""The raw network to be trained.
|
| 342 |
+
As in EDM equation (7), this is F_theta(c_in * x, c_noise(sigma)).
|
| 343 |
+
Here, c_noise(sigma) is computed in Conditioning module.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
r_noisy (torch.Tensor): scaled x_noisy (i.e., c_in * x)
|
| 347 |
+
[..., N_sample, N_atom, 3]
|
| 348 |
+
t_hat_noise_level (torch.Tensor): the noise level, as well as the time step t
|
| 349 |
+
[..., N_sample]
|
| 350 |
+
input_feature_dict (dict[str, Union[torch.Tensor, int, float, dict]]): input feature
|
| 351 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 352 |
+
[..., N_tokens, c_s_inputs]
|
| 353 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 354 |
+
[..., N_tokens, c_s]
|
| 355 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 356 |
+
[..., N_tokens, N_tokens, c_z]
|
| 357 |
+
inplace_safe (bool): Whether it is safe to use inplace operations. Defaults to False.
|
| 358 |
+
chunk_size (Optional[int]): Chunk size for memory-efficient operations. Defaults to None.
|
| 359 |
+
|
| 360 |
+
Returns:
|
| 361 |
+
torch.Tensor: coordinates update
|
| 362 |
+
[..., N_sample, N_atom, 3]
|
| 363 |
+
"""
|
| 364 |
+
N_sample = r_noisy.size(-3)
|
| 365 |
+
assert t_hat_noise_level.size(-1) == N_sample
|
| 366 |
+
|
| 367 |
+
blocks_per_ckpt = self.blocks_per_ckpt
|
| 368 |
+
if not torch.is_grad_enabled():
|
| 369 |
+
blocks_per_ckpt = None
|
| 370 |
+
# Conditioning, shared across difference samples
|
| 371 |
+
# Diffusion_conditioning consumes 7-8G when token num is 768,
|
| 372 |
+
# use checkpoint here if blocks_per_ckpt is not None.
|
| 373 |
+
if blocks_per_ckpt:
|
| 374 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 375 |
+
s_single, z_pair = checkpoint_fn(
|
| 376 |
+
self.diffusion_conditioning,
|
| 377 |
+
t_hat_noise_level,
|
| 378 |
+
input_feature_dict,
|
| 379 |
+
s_inputs,
|
| 380 |
+
s_trunk,
|
| 381 |
+
z_trunk,
|
| 382 |
+
inplace_safe,
|
| 383 |
+
)
|
| 384 |
+
else:
|
| 385 |
+
s_single, z_pair = self.diffusion_conditioning(
|
| 386 |
+
t_hat_noise_level=t_hat_noise_level,
|
| 387 |
+
input_feature_dict=input_feature_dict,
|
| 388 |
+
s_inputs=s_inputs,
|
| 389 |
+
s_trunk=s_trunk,
|
| 390 |
+
z_trunk=z_trunk,
|
| 391 |
+
inplace_safe=inplace_safe,
|
| 392 |
+
) # [..., N_sample, N_token, c_s], [..., N_token, N_token, c_z]
|
| 393 |
+
|
| 394 |
+
# Expand embeddings to match N_sample
|
| 395 |
+
s_trunk = expand_at_dim(
|
| 396 |
+
s_trunk, dim=-3, n=N_sample
|
| 397 |
+
) # [..., N_sample, N_token, c_s]
|
| 398 |
+
z_pair = expand_at_dim(
|
| 399 |
+
z_pair, dim=-4, n=N_sample
|
| 400 |
+
) # [..., N_sample, N_token, N_token, c_z]
|
| 401 |
+
# Fine-grained checkpoint for finetuning stage 2 (token num: 768) for avoiding OOM
|
| 402 |
+
if blocks_per_ckpt and self.use_fine_grained_checkpoint:
|
| 403 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 404 |
+
a_token, q_skip, c_skip, p_skip = checkpoint_fn(
|
| 405 |
+
self.atom_attention_encoder,
|
| 406 |
+
input_feature_dict,
|
| 407 |
+
r_noisy,
|
| 408 |
+
s_trunk,
|
| 409 |
+
z_pair,
|
| 410 |
+
inplace_safe,
|
| 411 |
+
chunk_size,
|
| 412 |
+
)
|
| 413 |
+
else:
|
| 414 |
+
# Sequence-local Atom Attention and aggregation to coarse-grained tokens
|
| 415 |
+
a_token, q_skip, c_skip, p_skip = self.atom_attention_encoder(
|
| 416 |
+
input_feature_dict=input_feature_dict,
|
| 417 |
+
r_l=r_noisy,
|
| 418 |
+
s=s_trunk,
|
| 419 |
+
z=z_pair,
|
| 420 |
+
inplace_safe=inplace_safe,
|
| 421 |
+
chunk_size=chunk_size,
|
| 422 |
+
)
|
| 423 |
+
# Full self-attention on token level.
|
| 424 |
+
if inplace_safe:
|
| 425 |
+
a_token += self.linear_no_bias_s(
|
| 426 |
+
self.layernorm_s(s_single)
|
| 427 |
+
) # [..., N_sample, N_token, c_token]
|
| 428 |
+
else:
|
| 429 |
+
a_token = a_token + self.linear_no_bias_s(
|
| 430 |
+
self.layernorm_s(s_single)
|
| 431 |
+
) # [..., N_sample, N_token, c_token]
|
| 432 |
+
a_token = self.diffusion_transformer(
|
| 433 |
+
a=a_token,
|
| 434 |
+
s=s_single,
|
| 435 |
+
z=z_pair,
|
| 436 |
+
inplace_safe=inplace_safe,
|
| 437 |
+
chunk_size=chunk_size,
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
a_token = self.layernorm_a(a_token)
|
| 441 |
+
|
| 442 |
+
# Fine-grained checkpoint for finetuning stage 2 (token num: 768) for avoiding OOM
|
| 443 |
+
if blocks_per_ckpt and self.use_fine_grained_checkpoint:
|
| 444 |
+
checkpoint_fn = get_checkpoint_fn()
|
| 445 |
+
r_update = checkpoint_fn(
|
| 446 |
+
self.atom_attention_decoder,
|
| 447 |
+
input_feature_dict,
|
| 448 |
+
a_token,
|
| 449 |
+
q_skip,
|
| 450 |
+
c_skip,
|
| 451 |
+
p_skip,
|
| 452 |
+
inplace_safe,
|
| 453 |
+
chunk_size,
|
| 454 |
+
)
|
| 455 |
+
else:
|
| 456 |
+
# Broadcast token activations to atoms and run Sequence-local Atom Attention
|
| 457 |
+
r_update = self.atom_attention_decoder(
|
| 458 |
+
input_feature_dict=input_feature_dict,
|
| 459 |
+
a=a_token,
|
| 460 |
+
q_skip=q_skip,
|
| 461 |
+
c_skip=c_skip,
|
| 462 |
+
p_skip=p_skip,
|
| 463 |
+
inplace_safe=inplace_safe,
|
| 464 |
+
chunk_size=chunk_size,
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
return r_update
|
| 468 |
+
|
| 469 |
+
def forward(
|
| 470 |
+
self,
|
| 471 |
+
x_noisy: torch.Tensor,
|
| 472 |
+
t_hat_noise_level: torch.Tensor,
|
| 473 |
+
input_feature_dict: dict[str, Union[torch.Tensor, int, float, dict]],
|
| 474 |
+
s_inputs: torch.Tensor,
|
| 475 |
+
s_trunk: torch.Tensor,
|
| 476 |
+
z_trunk: torch.Tensor,
|
| 477 |
+
inplace_safe: bool = False,
|
| 478 |
+
chunk_size: Optional[int] = None,
|
| 479 |
+
) -> torch.Tensor:
|
| 480 |
+
"""One step denoise: x_noisy, noise_level -> x_denoised
|
| 481 |
+
|
| 482 |
+
Args:
|
| 483 |
+
x_noisy (torch.Tensor): the noisy version of the input atom coords
|
| 484 |
+
[..., N_sample, N_atom,3]
|
| 485 |
+
t_hat_noise_level (torch.Tensor): the noise level, as well as the time step t
|
| 486 |
+
[..., N_sample]
|
| 487 |
+
input_feature_dict (dict[str, Union[torch.Tensor, int, float, dict]]): input meta feature dict
|
| 488 |
+
s_inputs (torch.Tensor): single embedding from InputFeatureEmbedder
|
| 489 |
+
[..., N_tokens, c_s_inputs]
|
| 490 |
+
s_trunk (torch.Tensor): single feature embedding from PairFormer (Alg17)
|
| 491 |
+
[..., N_tokens, c_s]
|
| 492 |
+
z_trunk (torch.Tensor): pair feature embedding from PairFormer (Alg17)
|
| 493 |
+
[..., N_tokens, N_tokens, c_z]
|
| 494 |
+
inplace_safe (bool): Whether it is safe to use inplace operations. Defaults to False.
|
| 495 |
+
chunk_size (Optional[int]): Chunk size for memory-efficient operations. Defaults to None.
|
| 496 |
+
|
| 497 |
+
Returns:
|
| 498 |
+
torch.Tensor: the denoised coordinates of x
|
| 499 |
+
[..., N_sample, N_atom,3]
|
| 500 |
+
"""
|
| 501 |
+
# Scale positions to dimensionless vectors with approximately unit variance
|
| 502 |
+
# As in EDM:
|
| 503 |
+
# r_noisy = (c_in * x_noisy)
|
| 504 |
+
# where c_in = 1 / sqrt(sigma_data^2 + sigma^2)
|
| 505 |
+
r_noisy = (
|
| 506 |
+
x_noisy
|
| 507 |
+
/ torch.sqrt(self.sigma_data**2 + t_hat_noise_level**2)[..., None, None]
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
# Compute the update given r_noisy (the scaled x_noisy)
|
| 511 |
+
# As in EDM:
|
| 512 |
+
# r_update = F(r_noisy, c_noise(sigma))
|
| 513 |
+
r_update = self.f_forward(
|
| 514 |
+
r_noisy=r_noisy,
|
| 515 |
+
t_hat_noise_level=t_hat_noise_level,
|
| 516 |
+
input_feature_dict=input_feature_dict,
|
| 517 |
+
s_inputs=s_inputs,
|
| 518 |
+
s_trunk=s_trunk,
|
| 519 |
+
z_trunk=z_trunk,
|
| 520 |
+
inplace_safe=inplace_safe,
|
| 521 |
+
chunk_size=chunk_size,
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# Rescale updates to positions and combine with input positions
|
| 525 |
+
# As in EDM:
|
| 526 |
+
# D = c_skip * x_noisy + c_out * r_update
|
| 527 |
+
# c_skip = sigma_data^2 / (sigma_data^2 + sigma^2)
|
| 528 |
+
# c_out = (sigma_data * sigma) / sqrt(sigma_data^2 + sigma^2)
|
| 529 |
+
# s_ratio = sigma / sigma_data
|
| 530 |
+
# c_skip = 1 / (1 + s_ratio^2)
|
| 531 |
+
# c_out = sigma / sqrt(1 + s_ratio^2)
|
| 532 |
+
|
| 533 |
+
s_ratio = (t_hat_noise_level / self.sigma_data)[..., None, None].to(
|
| 534 |
+
r_update.dtype
|
| 535 |
+
)
|
| 536 |
+
x_denoised = (
|
| 537 |
+
1 / (1 + s_ratio**2) * x_noisy
|
| 538 |
+
+ t_hat_noise_level[..., None, None] / torch.sqrt(1 + s_ratio**2) * r_update
|
| 539 |
+
).to(r_update.dtype)
|
| 540 |
+
|
| 541 |
+
return x_denoised
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/embedders.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Any, Optional, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
import torch.nn.functional as F
|
| 20 |
+
|
| 21 |
+
from protenix.model.modules.primitives import LinearNoBias
|
| 22 |
+
from protenix.model.modules.transformer import AtomAttentionEncoder
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class InputFeatureEmbedder(nn.Module):
|
| 26 |
+
"""
|
| 27 |
+
Implements Algorithm 2 in AF3
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
c_atom: int = 128,
|
| 33 |
+
c_atompair: int = 16,
|
| 34 |
+
c_token: int = 384,
|
| 35 |
+
) -> None:
|
| 36 |
+
"""
|
| 37 |
+
Args:
|
| 38 |
+
c_atom (int, optional): atom embedding dim. Defaults to 128.
|
| 39 |
+
c_atompair (int, optional): atom pair embedding dim. Defaults to 16.
|
| 40 |
+
c_token (int, optional): token embedding dim. Defaults to 384.
|
| 41 |
+
"""
|
| 42 |
+
super(InputFeatureEmbedder, self).__init__()
|
| 43 |
+
self.c_atom = c_atom
|
| 44 |
+
self.c_atompair = c_atompair
|
| 45 |
+
self.c_token = c_token
|
| 46 |
+
self.atom_attention_encoder = AtomAttentionEncoder(
|
| 47 |
+
c_atom=c_atom,
|
| 48 |
+
c_atompair=c_atompair,
|
| 49 |
+
c_token=c_token,
|
| 50 |
+
has_coords=False,
|
| 51 |
+
)
|
| 52 |
+
# Line2
|
| 53 |
+
self.input_feature = {"restype": 32, "profile": 32, "deletion_mean": 1}
|
| 54 |
+
|
| 55 |
+
def forward(
|
| 56 |
+
self,
|
| 57 |
+
input_feature_dict: dict[str, Any],
|
| 58 |
+
inplace_safe: bool = False,
|
| 59 |
+
chunk_size: Optional[int] = None,
|
| 60 |
+
) -> torch.Tensor:
|
| 61 |
+
"""
|
| 62 |
+
Args:
|
| 63 |
+
input_feature_dict (Dict[str, Any]): dict of input features
|
| 64 |
+
inplace_safe (bool): Whether it is safe to use inplace operations. Defaults to False.
|
| 65 |
+
chunk_size (Optional[int]): Chunk size for memory-efficient operations. Defaults to None.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
torch.Tensor: token embedding
|
| 69 |
+
[..., N_token, 384 (c_token) + 32 + 32 + 1 :=449]
|
| 70 |
+
"""
|
| 71 |
+
# Embed per-atom features.
|
| 72 |
+
a, _, _, _ = self.atom_attention_encoder(
|
| 73 |
+
input_feature_dict=input_feature_dict,
|
| 74 |
+
inplace_safe=inplace_safe,
|
| 75 |
+
chunk_size=chunk_size,
|
| 76 |
+
) # [..., N_token, c_token]
|
| 77 |
+
# Concatenate the per-token features.
|
| 78 |
+
batch_shape = input_feature_dict["restype"].shape[:-1]
|
| 79 |
+
s_inputs = torch.cat(
|
| 80 |
+
[a]
|
| 81 |
+
+ [
|
| 82 |
+
input_feature_dict[name].reshape(*batch_shape, d)
|
| 83 |
+
for name, d in self.input_feature.items()
|
| 84 |
+
],
|
| 85 |
+
dim=-1,
|
| 86 |
+
)
|
| 87 |
+
return s_inputs
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class RelativePositionEncoding(nn.Module):
|
| 91 |
+
"""
|
| 92 |
+
Implements Algorithm 3 in AF3
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
def __init__(self, r_max: int = 32, s_max: int = 2, c_z: int = 128) -> None:
|
| 96 |
+
"""
|
| 97 |
+
Args:
|
| 98 |
+
r_max (int, optional): Relative position indices clip value. Defaults to 32.
|
| 99 |
+
s_max (int, optional): Relative chain indices clip value. Defaults to 2.
|
| 100 |
+
c_z (int, optional): hidden dim [for pair embedding]. Defaults to 128.
|
| 101 |
+
"""
|
| 102 |
+
super(RelativePositionEncoding, self).__init__()
|
| 103 |
+
self.r_max = r_max
|
| 104 |
+
self.s_max = s_max
|
| 105 |
+
self.c_z = c_z
|
| 106 |
+
self.linear_no_bias = LinearNoBias(
|
| 107 |
+
in_features=(4 * self.r_max + 2 * self.s_max + 7), out_features=self.c_z
|
| 108 |
+
)
|
| 109 |
+
self.input_feature = {
|
| 110 |
+
"asym_id": 1,
|
| 111 |
+
"residue_index": 1,
|
| 112 |
+
"entity_id": 1,
|
| 113 |
+
"sym_id": 1,
|
| 114 |
+
"token_index": 1,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
def forward(self, input_feature_dict: dict[str, Any]) -> torch.Tensor:
|
| 118 |
+
"""
|
| 119 |
+
Args:
|
| 120 |
+
input_feature_dict (Dict[str, Any]): input meta feature dict.
|
| 121 |
+
asym_id / residue_index / entity_id / sym_id / token_index
|
| 122 |
+
[..., N_tokens]
|
| 123 |
+
Returns:
|
| 124 |
+
torch.Tensor: relative position encoding
|
| 125 |
+
[..., N_token, N_token, c_z]
|
| 126 |
+
"""
|
| 127 |
+
b_same_chain = (
|
| 128 |
+
input_feature_dict["asym_id"][..., :, None]
|
| 129 |
+
== input_feature_dict["asym_id"][..., None, :]
|
| 130 |
+
).long() # [..., N_token, N_token]
|
| 131 |
+
b_same_residue = (
|
| 132 |
+
input_feature_dict["residue_index"][..., :, None]
|
| 133 |
+
== input_feature_dict["residue_index"][..., None, :]
|
| 134 |
+
).long() # [..., N_token, N_token]
|
| 135 |
+
b_same_entity = (
|
| 136 |
+
input_feature_dict["entity_id"][..., :, None]
|
| 137 |
+
== input_feature_dict["entity_id"][..., None, :]
|
| 138 |
+
).long() # [..., N_token, N_token]
|
| 139 |
+
d_residue = torch.clip(
|
| 140 |
+
input=input_feature_dict["residue_index"][..., :, None]
|
| 141 |
+
- input_feature_dict["residue_index"][..., None, :]
|
| 142 |
+
+ self.r_max,
|
| 143 |
+
min=0,
|
| 144 |
+
max=2 * self.r_max,
|
| 145 |
+
) * b_same_chain + (1 - b_same_chain) * (
|
| 146 |
+
2 * self.r_max + 1
|
| 147 |
+
) # [..., N_token, N_token]
|
| 148 |
+
a_rel_pos = F.one_hot(d_residue, 2 * (self.r_max + 1))
|
| 149 |
+
d_token = torch.clip(
|
| 150 |
+
input=input_feature_dict["token_index"][..., :, None]
|
| 151 |
+
- input_feature_dict["token_index"][..., None, :]
|
| 152 |
+
+ self.r_max,
|
| 153 |
+
min=0,
|
| 154 |
+
max=2 * self.r_max,
|
| 155 |
+
) * b_same_chain * b_same_residue + (1 - b_same_chain * b_same_residue) * (
|
| 156 |
+
2 * self.r_max + 1
|
| 157 |
+
) # [..., N_token, N_token]
|
| 158 |
+
a_rel_token = F.one_hot(d_token, 2 * (self.r_max + 1))
|
| 159 |
+
d_chain = torch.clip(
|
| 160 |
+
input=input_feature_dict["sym_id"][..., :, None]
|
| 161 |
+
- input_feature_dict["sym_id"][..., None, :]
|
| 162 |
+
+ self.s_max,
|
| 163 |
+
min=0,
|
| 164 |
+
max=2 * self.s_max,
|
| 165 |
+
) * b_same_entity + (1 - b_same_entity) * (
|
| 166 |
+
2 * self.s_max + 1
|
| 167 |
+
) # [..., N_token, N_token]
|
| 168 |
+
a_rel_chain = F.one_hot(d_chain, 2 * (self.s_max + 1))
|
| 169 |
+
|
| 170 |
+
if self.training:
|
| 171 |
+
p = self.linear_no_bias(
|
| 172 |
+
torch.cat(
|
| 173 |
+
[a_rel_pos, a_rel_token, b_same_entity[..., None], a_rel_chain],
|
| 174 |
+
dim=-1,
|
| 175 |
+
).float()
|
| 176 |
+
) # [..., N_token, N_token, 2 * (self.r_max + 1)+ 2 * (self.r_max + 1)+ 1 + 2 * (self.s_max + 1)] -> [..., N_token, N_token, c_z]
|
| 177 |
+
return p
|
| 178 |
+
else:
|
| 179 |
+
del d_chain, d_token, d_residue, b_same_chain, b_same_residue
|
| 180 |
+
origin_shape = a_rel_pos.shape[:-1]
|
| 181 |
+
Ntoken = a_rel_pos.shape[-2]
|
| 182 |
+
a_rel_pos = a_rel_pos.reshape(-1, a_rel_pos.shape[-1])
|
| 183 |
+
chunk_num = 1 if Ntoken < 3200 else 8
|
| 184 |
+
a_rel_pos_chunks = torch.chunk(
|
| 185 |
+
a_rel_pos.reshape(-1, a_rel_pos.shape[-1]), chunk_num, dim=-2
|
| 186 |
+
)
|
| 187 |
+
a_rel_token_chunks = torch.chunk(
|
| 188 |
+
a_rel_token.reshape(-1, a_rel_token.shape[-1]), chunk_num, dim=-2
|
| 189 |
+
)
|
| 190 |
+
b_same_entity_chunks = torch.chunk(
|
| 191 |
+
b_same_entity.reshape(-1, 1), chunk_num, dim=-2
|
| 192 |
+
)
|
| 193 |
+
a_rel_chain_chunks = torch.chunk(
|
| 194 |
+
a_rel_chain.reshape(-1, a_rel_chain.shape[-1]), chunk_num, dim=-2
|
| 195 |
+
)
|
| 196 |
+
start = 0
|
| 197 |
+
p = None
|
| 198 |
+
for i in range(len(a_rel_pos_chunks)):
|
| 199 |
+
data = torch.cat(
|
| 200 |
+
[
|
| 201 |
+
a_rel_pos_chunks[i],
|
| 202 |
+
a_rel_token_chunks[i],
|
| 203 |
+
b_same_entity_chunks[i],
|
| 204 |
+
a_rel_chain_chunks[i],
|
| 205 |
+
],
|
| 206 |
+
dim=-1,
|
| 207 |
+
).float()
|
| 208 |
+
result = self.linear_no_bias(data)
|
| 209 |
+
del data
|
| 210 |
+
if p is None:
|
| 211 |
+
p = torch.empty(
|
| 212 |
+
(a_rel_pos.shape[-2], self.c_z),
|
| 213 |
+
device=a_rel_pos.device,
|
| 214 |
+
dtype=result.dtype,
|
| 215 |
+
)
|
| 216 |
+
p[start : start + result.shape[0]] = result
|
| 217 |
+
start += result.shape[0]
|
| 218 |
+
del result
|
| 219 |
+
del a_rel_pos, a_rel_token, b_same_entity, a_rel_chain
|
| 220 |
+
p = p.reshape(*origin_shape, -1)
|
| 221 |
+
return p
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class FourierEmbedding(nn.Module):
|
| 225 |
+
"""
|
| 226 |
+
Implements Algorithm 22 in AF3
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
def __init__(self, c: int, seed: int = 42) -> None:
|
| 230 |
+
"""
|
| 231 |
+
Args:
|
| 232 |
+
c (int): embedding dim.
|
| 233 |
+
"""
|
| 234 |
+
super(FourierEmbedding, self).__init__()
|
| 235 |
+
self.c = c
|
| 236 |
+
self.seed = seed
|
| 237 |
+
generator = torch.Generator()
|
| 238 |
+
generator.manual_seed(seed)
|
| 239 |
+
w_value = torch.randn(size=(c,), generator=generator)
|
| 240 |
+
self.w = nn.Parameter(w_value, requires_grad=False)
|
| 241 |
+
b_value = torch.randn(size=(c,), generator=generator)
|
| 242 |
+
self.b = nn.Parameter(b_value, requires_grad=False)
|
| 243 |
+
|
| 244 |
+
def forward(self, t_hat_noise_level: torch.Tensor) -> torch.Tensor:
|
| 245 |
+
"""
|
| 246 |
+
Args:
|
| 247 |
+
t_hat_noise_level (torch.Tensor): the noise level
|
| 248 |
+
[..., N_sample]
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
torch.Tensor: the output fourier embedding
|
| 252 |
+
[..., N_sample, c]
|
| 253 |
+
"""
|
| 254 |
+
return torch.cos(
|
| 255 |
+
input=2 * torch.pi * (t_hat_noise_level.unsqueeze(dim=-1) * self.w + self.b)
|
| 256 |
+
)
|
benchmarks/FoldBench/algorithms/Protenix/Protenix/protenix/model/modules/frames.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 ByteDance and/or its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
|
| 18 |
+
from protenix.model.utils import batched_gather
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def expressCoordinatesInFrame(
|
| 22 |
+
coordinate: torch.Tensor, frames: torch.Tensor, eps: float = 1e-8
|
| 23 |
+
) -> torch.Tensor:
|
| 24 |
+
"""Algorithm 29 Express coordinate in frame
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
coordinate (torch.Tensor): the input coordinate
|
| 28 |
+
[..., N_atom, 3]
|
| 29 |
+
frames (torch.Tensor): the input frames
|
| 30 |
+
[..., N_frame, 3, 3]
|
| 31 |
+
eps (float): Small epsilon value
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
torch.Tensor: the transformed coordinate projected onto frame basis
|
| 35 |
+
[..., N_frame, N_atom, 3]
|
| 36 |
+
"""
|
| 37 |
+
# Extract frame atoms
|
| 38 |
+
a, b, c = torch.unbind(frames, dim=-2) # a, b, c shape: [..., N_frame, 3]
|
| 39 |
+
w1 = F.normalize(a - b, dim=-1, eps=eps)
|
| 40 |
+
w2 = F.normalize(c - b, dim=-1, eps=eps)
|
| 41 |
+
# Build orthonormal basis
|
| 42 |
+
e1 = F.normalize(w1 + w2, dim=-1, eps=eps)
|
| 43 |
+
e2 = F.normalize(w2 - w1, dim=-1, eps=eps)
|
| 44 |
+
e3 = torch.cross(e1, e2, dim=-1) # [..., N_frame, 3]
|
| 45 |
+
# Project onto frame basis
|
| 46 |
+
d = coordinate[..., None, :, :] - b[..., None, :] # [..., N_frame, N_atom, 3]
|
| 47 |
+
x_transformed = torch.cat(
|
| 48 |
+
[
|
| 49 |
+
torch.sum(d * e1[..., None, :], dim=-1, keepdim=True),
|
| 50 |
+
torch.sum(d * e2[..., None, :], dim=-1, keepdim=True),
|
| 51 |
+
torch.sum(d * e3[..., None, :], dim=-1, keepdim=True),
|
| 52 |
+
],
|
| 53 |
+
dim=-1,
|
| 54 |
+
) # [..., N_frame, N_atom, 3]
|
| 55 |
+
return x_transformed
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def gather_frame_atom_by_indices(
|
| 59 |
+
coordinate: torch.Tensor, frame_atom_index: torch.Tensor, dim: int = -2
|
| 60 |
+
) -> torch.Tensor:
|
| 61 |
+
"""construct frames from coordinate
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
coordinate (torch.Tensor): the input coordinate
|
| 65 |
+
[..., N_atom, 3]
|
| 66 |
+
frame_atom_index (torch.Tensor): indices of three atoms in each frame
|
| 67 |
+
[..., N_frame, 3] or [N_frame, 3]
|
| 68 |
+
dim (torch.Tensor): along which dimension to select the frame atoms
|
| 69 |
+
Returns:
|
| 70 |
+
torch.Tensor: the constructed frames
|
| 71 |
+
[..., N_frame, 3[three atom], 3[three coordinate]]
|
| 72 |
+
"""
|
| 73 |
+
if len(frame_atom_index.shape) == 2:
|
| 74 |
+
# the navie case
|
| 75 |
+
x1 = torch.index_select(
|
| 76 |
+
coordinate, dim=dim, index=frame_atom_index[:, 0]
|
| 77 |
+
) # [..., N_frame, 3]
|
| 78 |
+
x2 = torch.index_select(
|
| 79 |
+
coordinate, dim=dim, index=frame_atom_index[:, 1]
|
| 80 |
+
) # [..., N_frame, 3]
|
| 81 |
+
x3 = torch.index_select(
|
| 82 |
+
coordinate, dim=dim, index=frame_atom_index[:, 2]
|
| 83 |
+
) # [..., N_frame, 3]
|
| 84 |
+
return torch.stack([x1, x2, x3], dim=dim)
|
| 85 |
+
else:
|
| 86 |
+
assert (
|
| 87 |
+
frame_atom_index.shape[:dim] == coordinate.shape[:dim]
|
| 88 |
+
), "batch size dims should match"
|
| 89 |
+
|
| 90 |
+
x1 = batched_gather(
|
| 91 |
+
data=coordinate,
|
| 92 |
+
inds=frame_atom_index[..., 0],
|
| 93 |
+
dim=dim,
|
| 94 |
+
no_batch_dims=len(coordinate.shape[:dim]),
|
| 95 |
+
) # [..., N_frame, 3]
|
| 96 |
+
x2 = batched_gather(
|
| 97 |
+
data=coordinate,
|
| 98 |
+
inds=frame_atom_index[..., 1],
|
| 99 |
+
dim=dim,
|
| 100 |
+
no_batch_dims=len(coordinate.shape[:dim]),
|
| 101 |
+
) # [..., N_frame, 3]
|
| 102 |
+
x3 = batched_gather(
|
| 103 |
+
data=coordinate,
|
| 104 |
+
inds=frame_atom_index[..., 2],
|
| 105 |
+
dim=dim,
|
| 106 |
+
no_batch_dims=len(coordinate.shape[:dim]),
|
| 107 |
+
) # [..., N_frame, 3]
|
| 108 |
+
return torch.stack([x1, x2, x3], dim=dim)
|