use openfold features
Browse files- README.md +4 -4
- data/pdb_test.p +3 -0
- data/pdb_train.p +3 -0
- openfold/.gitignore +1 -0
- openfold/data_transforms.py +1211 -0
- openfold/protein.py +429 -0
- openfold/residue_constants.py +1310 -0
- openfold/rigid_utils.py +1368 -0
- openfold/tensor_utils.py +121 -0
- parse_complexes.py +137 -48
- pdb.slurm +1 -1
- pdb_protein_ligand_complexes.py +0 -131
README.md
CHANGED
|
@@ -9,7 +9,7 @@ tags:
|
|
| 9 |
|
| 10 |
## How to use the data sets
|
| 11 |
|
| 12 |
-
This dataset contains
|
| 13 |
of their complexes from the PDB.
|
| 14 |
|
| 15 |
SMILES are assumed to be tokenized by the regex from P. Schwaller.
|
|
@@ -35,9 +35,9 @@ are considered.
|
|
| 35 |
Load a test/train split using
|
| 36 |
|
| 37 |
```
|
| 38 |
-
|
| 39 |
-
train =
|
| 40 |
-
|
| 41 |
```
|
| 42 |
|
| 43 |
### Manual update from PDB
|
|
|
|
| 9 |
|
| 10 |
## How to use the data sets
|
| 11 |
|
| 12 |
+
This dataset contains about 36,000 unique pairs of protein sequences and ligand SMILES, and the coordinates
|
| 13 |
of their complexes from the PDB.
|
| 14 |
|
| 15 |
SMILES are assumed to be tokenized by the regex from P. Schwaller.
|
|
|
|
| 35 |
Load a test/train split using
|
| 36 |
|
| 37 |
```
|
| 38 |
+
import pandas as pd
|
| 39 |
+
train = pd.read_pickle('data/pdb_train.p')
|
| 40 |
+
test = pd.read_pickle('data/pdb_test.p')
|
| 41 |
```
|
| 42 |
|
| 43 |
### Manual update from PDB
|
data/pdb_test.p
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5c03e3bf9c84e79db19f566794945c3f9441f6797b8c7af9cf050a7db8de88c
|
| 3 |
+
size 1428764270
|
data/pdb_train.p
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f837b7c209aea0cfdadd44b9478ae5f2b0ea12fc138a07c3542ec26d42780667
|
| 3 |
+
size 13251374040
|
openfold/.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__pycache__/*
|
openfold/data_transforms.py
ADDED
|
@@ -0,0 +1,1211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import itertools
|
| 17 |
+
from functools import reduce, wraps
|
| 18 |
+
from operator import add
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
from . import residue_constants as rc
|
| 24 |
+
from .rigid_utils import Rotation, Rigid
|
| 25 |
+
from .tensor_utils import (
|
| 26 |
+
tree_map,
|
| 27 |
+
tensor_tree_map,
|
| 28 |
+
batched_gather,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
MSA_FEATURE_NAMES = [
|
| 33 |
+
"msa",
|
| 34 |
+
"deletion_matrix",
|
| 35 |
+
"msa_mask",
|
| 36 |
+
"msa_row_mask",
|
| 37 |
+
"bert_mask",
|
| 38 |
+
"true_msa",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def cast_to_64bit_ints(protein):
|
| 43 |
+
# We keep all ints as int64
|
| 44 |
+
for k, v in protein.items():
|
| 45 |
+
if v.dtype == torch.int32:
|
| 46 |
+
protein[k] = v.type(torch.int64)
|
| 47 |
+
|
| 48 |
+
return protein
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def make_one_hot(x, num_classes):
|
| 52 |
+
x_one_hot = torch.zeros(*x.shape, num_classes, device=x.device)
|
| 53 |
+
x_one_hot.scatter_(-1, x.unsqueeze(-1), 1)
|
| 54 |
+
return x_one_hot
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def make_seq_mask(protein):
|
| 58 |
+
protein["seq_mask"] = torch.ones(
|
| 59 |
+
protein["aatype"].shape, dtype=torch.float32
|
| 60 |
+
)
|
| 61 |
+
return protein
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def make_template_mask(protein):
|
| 65 |
+
protein["template_mask"] = torch.ones(
|
| 66 |
+
protein["template_aatype"].shape[0], dtype=torch.float32
|
| 67 |
+
)
|
| 68 |
+
return protein
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def curry1(f):
|
| 72 |
+
"""Supply all arguments but the first."""
|
| 73 |
+
@wraps(f)
|
| 74 |
+
def fc(*args, **kwargs):
|
| 75 |
+
return lambda x: f(x, *args, **kwargs)
|
| 76 |
+
|
| 77 |
+
return fc
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def make_all_atom_aatype(protein):
|
| 81 |
+
protein["all_atom_aatype"] = protein["aatype"]
|
| 82 |
+
return protein
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def fix_templates_aatype(protein):
|
| 86 |
+
# Map one-hot to indices
|
| 87 |
+
num_templates = protein["template_aatype"].shape[0]
|
| 88 |
+
if(num_templates > 0):
|
| 89 |
+
protein["template_aatype"] = torch.argmax(
|
| 90 |
+
protein["template_aatype"], dim=-1
|
| 91 |
+
)
|
| 92 |
+
# Map hhsearch-aatype to our aatype.
|
| 93 |
+
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
|
| 94 |
+
new_order = torch.tensor(
|
| 95 |
+
new_order_list, dtype=torch.int64, device=protein["aatype"].device,
|
| 96 |
+
).expand(num_templates, -1)
|
| 97 |
+
protein["template_aatype"] = torch.gather(
|
| 98 |
+
new_order, 1, index=protein["template_aatype"]
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
return protein
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def correct_msa_restypes(protein):
|
| 105 |
+
"""Correct MSA restype to have the same order as rc."""
|
| 106 |
+
new_order_list = rc.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
|
| 107 |
+
new_order = torch.tensor(
|
| 108 |
+
[new_order_list] * protein["msa"].shape[1],
|
| 109 |
+
device=protein["msa"].device,
|
| 110 |
+
).transpose(0, 1)
|
| 111 |
+
protein["msa"] = torch.gather(new_order, 0, protein["msa"])
|
| 112 |
+
|
| 113 |
+
perm_matrix = np.zeros((22, 22), dtype=np.float32)
|
| 114 |
+
perm_matrix[range(len(new_order_list)), new_order_list] = 1.0
|
| 115 |
+
|
| 116 |
+
for k in protein:
|
| 117 |
+
if "profile" in k:
|
| 118 |
+
num_dim = protein[k].shape.as_list()[-1]
|
| 119 |
+
assert num_dim in [
|
| 120 |
+
20,
|
| 121 |
+
21,
|
| 122 |
+
22,
|
| 123 |
+
], "num_dim for %s out of expected range: %s" % (k, num_dim)
|
| 124 |
+
protein[k] = torch.dot(protein[k], perm_matrix[:num_dim, :num_dim])
|
| 125 |
+
|
| 126 |
+
return protein
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def squeeze_features(protein):
|
| 130 |
+
"""Remove singleton and repeated dimensions in protein features."""
|
| 131 |
+
protein["aatype"] = torch.argmax(protein["aatype"], dim=-1)
|
| 132 |
+
for k in [
|
| 133 |
+
"domain_name",
|
| 134 |
+
"msa",
|
| 135 |
+
"num_alignments",
|
| 136 |
+
"seq_length",
|
| 137 |
+
"sequence",
|
| 138 |
+
"superfamily",
|
| 139 |
+
"deletion_matrix",
|
| 140 |
+
"resolution",
|
| 141 |
+
"between_segment_residues",
|
| 142 |
+
"residue_index",
|
| 143 |
+
"template_all_atom_mask",
|
| 144 |
+
]:
|
| 145 |
+
if k in protein:
|
| 146 |
+
final_dim = protein[k].shape[-1]
|
| 147 |
+
if isinstance(final_dim, int) and final_dim == 1:
|
| 148 |
+
if torch.is_tensor(protein[k]):
|
| 149 |
+
protein[k] = torch.squeeze(protein[k], dim=-1)
|
| 150 |
+
else:
|
| 151 |
+
protein[k] = np.squeeze(protein[k], axis=-1)
|
| 152 |
+
|
| 153 |
+
for k in ["seq_length", "num_alignments"]:
|
| 154 |
+
if k in protein:
|
| 155 |
+
protein[k] = protein[k][0]
|
| 156 |
+
|
| 157 |
+
return protein
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@curry1
|
| 161 |
+
def randomly_replace_msa_with_unknown(protein, replace_proportion):
|
| 162 |
+
"""Replace a portion of the MSA with 'X'."""
|
| 163 |
+
msa_mask = torch.rand(protein["msa"].shape) < replace_proportion
|
| 164 |
+
x_idx = 20
|
| 165 |
+
gap_idx = 21
|
| 166 |
+
msa_mask = torch.logical_and(msa_mask, protein["msa"] != gap_idx)
|
| 167 |
+
protein["msa"] = torch.where(
|
| 168 |
+
msa_mask,
|
| 169 |
+
torch.ones_like(protein["msa"]) * x_idx,
|
| 170 |
+
protein["msa"]
|
| 171 |
+
)
|
| 172 |
+
aatype_mask = torch.rand(protein["aatype"].shape) < replace_proportion
|
| 173 |
+
|
| 174 |
+
protein["aatype"] = torch.where(
|
| 175 |
+
aatype_mask,
|
| 176 |
+
torch.ones_like(protein["aatype"]) * x_idx,
|
| 177 |
+
protein["aatype"],
|
| 178 |
+
)
|
| 179 |
+
return protein
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@curry1
|
| 183 |
+
def sample_msa(protein, max_seq, keep_extra, seed=None):
|
| 184 |
+
"""Sample MSA randomly, remaining sequences are stored are stored as `extra_*`."""
|
| 185 |
+
num_seq = protein["msa"].shape[0]
|
| 186 |
+
g = torch.Generator(device=protein["msa"].device)
|
| 187 |
+
if seed is not None:
|
| 188 |
+
g.manual_seed(seed)
|
| 189 |
+
shuffled = torch.randperm(num_seq - 1, generator=g) + 1
|
| 190 |
+
index_order = torch.cat(
|
| 191 |
+
(torch.tensor([0], device=shuffled.device), shuffled),
|
| 192 |
+
dim=0
|
| 193 |
+
)
|
| 194 |
+
num_sel = min(max_seq, num_seq)
|
| 195 |
+
sel_seq, not_sel_seq = torch.split(
|
| 196 |
+
index_order, [num_sel, num_seq - num_sel]
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
for k in MSA_FEATURE_NAMES:
|
| 200 |
+
if k in protein:
|
| 201 |
+
if keep_extra:
|
| 202 |
+
protein["extra_" + k] = torch.index_select(
|
| 203 |
+
protein[k], 0, not_sel_seq
|
| 204 |
+
)
|
| 205 |
+
protein[k] = torch.index_select(protein[k], 0, sel_seq)
|
| 206 |
+
|
| 207 |
+
return protein
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@curry1
|
| 211 |
+
def add_distillation_flag(protein, distillation):
|
| 212 |
+
protein['is_distillation'] = distillation
|
| 213 |
+
return protein
|
| 214 |
+
|
| 215 |
+
@curry1
|
| 216 |
+
def sample_msa_distillation(protein, max_seq):
|
| 217 |
+
if(protein["is_distillation"] == 1):
|
| 218 |
+
protein = sample_msa(max_seq, keep_extra=False)(protein)
|
| 219 |
+
return protein
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
@curry1
|
| 223 |
+
def crop_extra_msa(protein, max_extra_msa):
|
| 224 |
+
num_seq = protein["extra_msa"].shape[0]
|
| 225 |
+
num_sel = min(max_extra_msa, num_seq)
|
| 226 |
+
select_indices = torch.randperm(num_seq)[:num_sel]
|
| 227 |
+
for k in MSA_FEATURE_NAMES:
|
| 228 |
+
if "extra_" + k in protein:
|
| 229 |
+
protein["extra_" + k] = torch.index_select(
|
| 230 |
+
protein["extra_" + k], 0, select_indices
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
return protein
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def delete_extra_msa(protein):
|
| 237 |
+
for k in MSA_FEATURE_NAMES:
|
| 238 |
+
if "extra_" + k in protein:
|
| 239 |
+
del protein["extra_" + k]
|
| 240 |
+
return protein
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
# Not used in inference
|
| 244 |
+
@curry1
|
| 245 |
+
def block_delete_msa(protein, config):
|
| 246 |
+
num_seq = protein["msa"].shape[0]
|
| 247 |
+
block_num_seq = torch.floor(
|
| 248 |
+
torch.tensor(num_seq, dtype=torch.float32, device=protein["msa"].device)
|
| 249 |
+
* config.msa_fraction_per_block
|
| 250 |
+
).to(torch.int32)
|
| 251 |
+
|
| 252 |
+
if config.randomize_num_blocks:
|
| 253 |
+
nb = torch.distributions.uniform.Uniform(
|
| 254 |
+
0, config.num_blocks + 1
|
| 255 |
+
).sample()
|
| 256 |
+
else:
|
| 257 |
+
nb = config.num_blocks
|
| 258 |
+
|
| 259 |
+
del_block_starts = torch.distributions.Uniform(0, num_seq).sample(nb)
|
| 260 |
+
del_blocks = del_block_starts[:, None] + torch.range(block_num_seq)
|
| 261 |
+
del_blocks = torch.clip(del_blocks, 0, num_seq - 1)
|
| 262 |
+
del_indices = torch.unique(torch.sort(torch.reshape(del_blocks, [-1])))[0]
|
| 263 |
+
|
| 264 |
+
# Make sure we keep the original sequence
|
| 265 |
+
combined = torch.cat((torch.range(1, num_seq)[None], del_indices[None]))
|
| 266 |
+
uniques, counts = combined.unique(return_counts=True)
|
| 267 |
+
difference = uniques[counts == 1]
|
| 268 |
+
intersection = uniques[counts > 1]
|
| 269 |
+
keep_indices = torch.squeeze(difference, 0)
|
| 270 |
+
|
| 271 |
+
for k in MSA_FEATURE_NAMES:
|
| 272 |
+
if k in protein:
|
| 273 |
+
protein[k] = torch.gather(protein[k], keep_indices)
|
| 274 |
+
|
| 275 |
+
return protein
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
@curry1
|
| 279 |
+
def nearest_neighbor_clusters(protein, gap_agreement_weight=0.0):
|
| 280 |
+
weights = torch.cat(
|
| 281 |
+
[
|
| 282 |
+
torch.ones(21, device=protein["msa"].device),
|
| 283 |
+
gap_agreement_weight * torch.ones(1, device=protein["msa"].device),
|
| 284 |
+
torch.zeros(1, device=protein["msa"].device)
|
| 285 |
+
],
|
| 286 |
+
0,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Make agreement score as weighted Hamming distance
|
| 290 |
+
msa_one_hot = make_one_hot(protein["msa"], 23)
|
| 291 |
+
sample_one_hot = protein["msa_mask"][:, :, None] * msa_one_hot
|
| 292 |
+
extra_msa_one_hot = make_one_hot(protein["extra_msa"], 23)
|
| 293 |
+
extra_one_hot = protein["extra_msa_mask"][:, :, None] * extra_msa_one_hot
|
| 294 |
+
|
| 295 |
+
num_seq, num_res, _ = sample_one_hot.shape
|
| 296 |
+
extra_num_seq, _, _ = extra_one_hot.shape
|
| 297 |
+
|
| 298 |
+
# Compute tf.einsum('mrc,nrc,c->mn', sample_one_hot, extra_one_hot, weights)
|
| 299 |
+
# in an optimized fashion to avoid possible memory or computation blowup.
|
| 300 |
+
agreement = torch.matmul(
|
| 301 |
+
torch.reshape(extra_one_hot, [extra_num_seq, num_res * 23]),
|
| 302 |
+
torch.reshape(
|
| 303 |
+
sample_one_hot * weights, [num_seq, num_res * 23]
|
| 304 |
+
).transpose(0, 1),
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
# Assign each sequence in the extra sequences to the closest MSA sample
|
| 308 |
+
protein["extra_cluster_assignment"] = torch.argmax(agreement, dim=1).to(
|
| 309 |
+
torch.int64
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
return protein
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def unsorted_segment_sum(data, segment_ids, num_segments):
|
| 316 |
+
"""
|
| 317 |
+
Computes the sum along segments of a tensor. Similar to
|
| 318 |
+
tf.unsorted_segment_sum, but only supports 1-D indices.
|
| 319 |
+
|
| 320 |
+
:param data: A tensor whose segments are to be summed.
|
| 321 |
+
:param segment_ids: The 1-D segment indices tensor.
|
| 322 |
+
:param num_segments: The number of segments.
|
| 323 |
+
:return: A tensor of same data type as the data argument.
|
| 324 |
+
"""
|
| 325 |
+
assert (
|
| 326 |
+
len(segment_ids.shape) == 1 and
|
| 327 |
+
segment_ids.shape[0] == data.shape[0]
|
| 328 |
+
)
|
| 329 |
+
segment_ids = segment_ids.view(
|
| 330 |
+
segment_ids.shape[0], *((1,) * len(data.shape[1:]))
|
| 331 |
+
)
|
| 332 |
+
segment_ids = segment_ids.expand(data.shape)
|
| 333 |
+
shape = [num_segments] + list(data.shape[1:])
|
| 334 |
+
tensor = (
|
| 335 |
+
torch.zeros(*shape, device=segment_ids.device)
|
| 336 |
+
.scatter_add_(0, segment_ids, data.float())
|
| 337 |
+
)
|
| 338 |
+
tensor = tensor.type(data.dtype)
|
| 339 |
+
return tensor
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@curry1
|
| 343 |
+
def summarize_clusters(protein):
|
| 344 |
+
"""Produce profile and deletion_matrix_mean within each cluster."""
|
| 345 |
+
num_seq = protein["msa"].shape[0]
|
| 346 |
+
|
| 347 |
+
def csum(x):
|
| 348 |
+
return unsorted_segment_sum(
|
| 349 |
+
x, protein["extra_cluster_assignment"], num_seq
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
mask = protein["extra_msa_mask"]
|
| 353 |
+
mask_counts = 1e-6 + protein["msa_mask"] + csum(mask) # Include center
|
| 354 |
+
|
| 355 |
+
msa_sum = csum(mask[:, :, None] * make_one_hot(protein["extra_msa"], 23))
|
| 356 |
+
msa_sum += make_one_hot(protein["msa"], 23) # Original sequence
|
| 357 |
+
protein["cluster_profile"] = msa_sum / mask_counts[:, :, None]
|
| 358 |
+
del msa_sum
|
| 359 |
+
|
| 360 |
+
del_sum = csum(mask * protein["extra_deletion_matrix"])
|
| 361 |
+
del_sum += protein["deletion_matrix"] # Original sequence
|
| 362 |
+
protein["cluster_deletion_mean"] = del_sum / mask_counts
|
| 363 |
+
del del_sum
|
| 364 |
+
|
| 365 |
+
return protein
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def make_msa_mask(protein):
|
| 369 |
+
"""Mask features are all ones, but will later be zero-padded."""
|
| 370 |
+
protein["msa_mask"] = torch.ones(protein["msa"].shape, dtype=torch.float32)
|
| 371 |
+
protein["msa_row_mask"] = torch.ones(
|
| 372 |
+
(protein["msa"].shape[0]), dtype=torch.float32
|
| 373 |
+
)
|
| 374 |
+
return protein
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_mask):
|
| 378 |
+
"""Create pseudo beta features."""
|
| 379 |
+
is_gly = torch.eq(aatype, rc.restype_order["G"])
|
| 380 |
+
ca_idx = rc.atom_order["CA"]
|
| 381 |
+
cb_idx = rc.atom_order["CB"]
|
| 382 |
+
pseudo_beta = torch.where(
|
| 383 |
+
torch.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]),
|
| 384 |
+
all_atom_positions[..., ca_idx, :],
|
| 385 |
+
all_atom_positions[..., cb_idx, :],
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
if all_atom_mask is not None:
|
| 389 |
+
pseudo_beta_mask = torch.where(
|
| 390 |
+
is_gly, all_atom_mask[..., ca_idx], all_atom_mask[..., cb_idx]
|
| 391 |
+
)
|
| 392 |
+
return pseudo_beta, pseudo_beta_mask
|
| 393 |
+
else:
|
| 394 |
+
return pseudo_beta
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
@curry1
|
| 398 |
+
def make_pseudo_beta(protein, prefix=""):
|
| 399 |
+
"""Create pseudo-beta (alpha for glycine) position and mask."""
|
| 400 |
+
assert prefix in ["", "template_"]
|
| 401 |
+
(
|
| 402 |
+
protein[prefix + "pseudo_beta"],
|
| 403 |
+
protein[prefix + "pseudo_beta_mask"],
|
| 404 |
+
) = pseudo_beta_fn(
|
| 405 |
+
protein["template_aatype" if prefix else "aatype"],
|
| 406 |
+
protein[prefix + "all_atom_positions"],
|
| 407 |
+
protein["template_all_atom_mask" if prefix else "all_atom_mask"],
|
| 408 |
+
)
|
| 409 |
+
return protein
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
@curry1
|
| 413 |
+
def add_constant_field(protein, key, value):
|
| 414 |
+
protein[key] = torch.tensor(value, device=protein["msa"].device)
|
| 415 |
+
return protein
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def shaped_categorical(probs, epsilon=1e-10):
|
| 419 |
+
ds = probs.shape
|
| 420 |
+
num_classes = ds[-1]
|
| 421 |
+
distribution = torch.distributions.categorical.Categorical(
|
| 422 |
+
torch.reshape(probs + epsilon, [-1, num_classes])
|
| 423 |
+
)
|
| 424 |
+
counts = distribution.sample()
|
| 425 |
+
return torch.reshape(counts, ds[:-1])
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def make_hhblits_profile(protein):
|
| 429 |
+
"""Compute the HHblits MSA profile if not already present."""
|
| 430 |
+
if "hhblits_profile" in protein:
|
| 431 |
+
return protein
|
| 432 |
+
|
| 433 |
+
# Compute the profile for every residue (over all MSA sequences).
|
| 434 |
+
msa_one_hot = make_one_hot(protein["msa"], 22)
|
| 435 |
+
|
| 436 |
+
protein["hhblits_profile"] = torch.mean(msa_one_hot, dim=0)
|
| 437 |
+
return protein
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
@curry1
|
| 441 |
+
def make_masked_msa(protein, config, replace_fraction):
|
| 442 |
+
"""Create data for BERT on raw MSA."""
|
| 443 |
+
# Add a random amino acid uniformly.
|
| 444 |
+
random_aa = torch.tensor(
|
| 445 |
+
[0.05] * 20 + [0.0, 0.0],
|
| 446 |
+
dtype=torch.float32,
|
| 447 |
+
device=protein["aatype"].device
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
categorical_probs = (
|
| 451 |
+
config.uniform_prob * random_aa
|
| 452 |
+
+ config.profile_prob * protein["hhblits_profile"]
|
| 453 |
+
+ config.same_prob * make_one_hot(protein["msa"], 22)
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
# Put all remaining probability on [MASK] which is a new column
|
| 457 |
+
pad_shapes = list(
|
| 458 |
+
reduce(add, [(0, 0) for _ in range(len(categorical_probs.shape))])
|
| 459 |
+
)
|
| 460 |
+
pad_shapes[1] = 1
|
| 461 |
+
mask_prob = (
|
| 462 |
+
1.0 - config.profile_prob - config.same_prob - config.uniform_prob
|
| 463 |
+
)
|
| 464 |
+
assert mask_prob >= 0.0
|
| 465 |
+
|
| 466 |
+
categorical_probs = torch.nn.functional.pad(
|
| 467 |
+
categorical_probs, pad_shapes, value=mask_prob
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
sh = protein["msa"].shape
|
| 471 |
+
mask_position = torch.rand(sh) < replace_fraction
|
| 472 |
+
|
| 473 |
+
bert_msa = shaped_categorical(categorical_probs)
|
| 474 |
+
bert_msa = torch.where(mask_position, bert_msa, protein["msa"])
|
| 475 |
+
|
| 476 |
+
# Mix real and masked MSA
|
| 477 |
+
protein["bert_mask"] = mask_position.to(torch.float32)
|
| 478 |
+
protein["true_msa"] = protein["msa"]
|
| 479 |
+
protein["msa"] = bert_msa
|
| 480 |
+
|
| 481 |
+
return protein
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
@curry1
|
| 485 |
+
def make_fixed_size(
|
| 486 |
+
protein,
|
| 487 |
+
shape_schema,
|
| 488 |
+
msa_cluster_size,
|
| 489 |
+
extra_msa_size,
|
| 490 |
+
num_res=0,
|
| 491 |
+
num_templates=0,
|
| 492 |
+
):
|
| 493 |
+
"""Guess at the MSA and sequence dimension to make fixed size."""
|
| 494 |
+
pad_size_map = {
|
| 495 |
+
NUM_RES: num_res,
|
| 496 |
+
NUM_MSA_SEQ: msa_cluster_size,
|
| 497 |
+
NUM_EXTRA_SEQ: extra_msa_size,
|
| 498 |
+
NUM_TEMPLATES: num_templates,
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
for k, v in protein.items():
|
| 502 |
+
# Don't transfer this to the accelerator.
|
| 503 |
+
if k == "extra_cluster_assignment":
|
| 504 |
+
continue
|
| 505 |
+
shape = list(v.shape)
|
| 506 |
+
schema = shape_schema[k]
|
| 507 |
+
msg = "Rank mismatch between shape and shape schema for"
|
| 508 |
+
assert len(shape) == len(schema), f"{msg} {k}: {shape} vs {schema}"
|
| 509 |
+
pad_size = [
|
| 510 |
+
pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)
|
| 511 |
+
]
|
| 512 |
+
|
| 513 |
+
padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
|
| 514 |
+
padding.reverse()
|
| 515 |
+
padding = list(itertools.chain(*padding))
|
| 516 |
+
if padding:
|
| 517 |
+
protein[k] = torch.nn.functional.pad(v, padding)
|
| 518 |
+
protein[k] = torch.reshape(protein[k], pad_size)
|
| 519 |
+
|
| 520 |
+
return protein
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
@curry1
|
| 524 |
+
def make_msa_feat(protein):
|
| 525 |
+
"""Create and concatenate MSA features."""
|
| 526 |
+
# Whether there is a domain break. Always zero for chains, but keeping for
|
| 527 |
+
# compatibility with domain datasets.
|
| 528 |
+
has_break = torch.clip(
|
| 529 |
+
protein["between_segment_residues"].to(torch.float32), 0, 1
|
| 530 |
+
)
|
| 531 |
+
aatype_1hot = make_one_hot(protein["aatype"], 21)
|
| 532 |
+
|
| 533 |
+
target_feat = [
|
| 534 |
+
torch.unsqueeze(has_break, dim=-1),
|
| 535 |
+
aatype_1hot, # Everyone gets the original sequence.
|
| 536 |
+
]
|
| 537 |
+
|
| 538 |
+
msa_1hot = make_one_hot(protein["msa"], 23)
|
| 539 |
+
has_deletion = torch.clip(protein["deletion_matrix"], 0.0, 1.0)
|
| 540 |
+
deletion_value = torch.atan(protein["deletion_matrix"] / 3.0) * (
|
| 541 |
+
2.0 / np.pi
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
msa_feat = [
|
| 545 |
+
msa_1hot,
|
| 546 |
+
torch.unsqueeze(has_deletion, dim=-1),
|
| 547 |
+
torch.unsqueeze(deletion_value, dim=-1),
|
| 548 |
+
]
|
| 549 |
+
|
| 550 |
+
if "cluster_profile" in protein:
|
| 551 |
+
deletion_mean_value = torch.atan(
|
| 552 |
+
protein["cluster_deletion_mean"] / 3.0
|
| 553 |
+
) * (2.0 / np.pi)
|
| 554 |
+
msa_feat.extend(
|
| 555 |
+
[
|
| 556 |
+
protein["cluster_profile"],
|
| 557 |
+
torch.unsqueeze(deletion_mean_value, dim=-1),
|
| 558 |
+
]
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
if "extra_deletion_matrix" in protein:
|
| 562 |
+
protein["extra_has_deletion"] = torch.clip(
|
| 563 |
+
protein["extra_deletion_matrix"], 0.0, 1.0
|
| 564 |
+
)
|
| 565 |
+
protein["extra_deletion_value"] = torch.atan(
|
| 566 |
+
protein["extra_deletion_matrix"] / 3.0
|
| 567 |
+
) * (2.0 / np.pi)
|
| 568 |
+
|
| 569 |
+
protein["msa_feat"] = torch.cat(msa_feat, dim=-1)
|
| 570 |
+
protein["target_feat"] = torch.cat(target_feat, dim=-1)
|
| 571 |
+
return protein
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
@curry1
|
| 575 |
+
def select_feat(protein, feature_list):
|
| 576 |
+
return {k: v for k, v in protein.items() if k in feature_list}
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
@curry1
|
| 580 |
+
def crop_templates(protein, max_templates):
|
| 581 |
+
for k, v in protein.items():
|
| 582 |
+
if k.startswith("template_"):
|
| 583 |
+
protein[k] = v[:max_templates]
|
| 584 |
+
return protein
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def make_atom14_masks(protein):
|
| 588 |
+
"""Construct denser atom positions (14 dimensions instead of 37)."""
|
| 589 |
+
restype_atom14_to_atom37 = []
|
| 590 |
+
restype_atom37_to_atom14 = []
|
| 591 |
+
restype_atom14_mask = []
|
| 592 |
+
|
| 593 |
+
for rt in rc.restypes:
|
| 594 |
+
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
|
| 595 |
+
restype_atom14_to_atom37.append(
|
| 596 |
+
[(rc.atom_order[name] if name else 0) for name in atom_names]
|
| 597 |
+
)
|
| 598 |
+
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
|
| 599 |
+
restype_atom37_to_atom14.append(
|
| 600 |
+
[
|
| 601 |
+
(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
|
| 602 |
+
for name in rc.atom_types
|
| 603 |
+
]
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
restype_atom14_mask.append(
|
| 607 |
+
[(1.0 if name else 0.0) for name in atom_names]
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
# Add dummy mapping for restype 'UNK'
|
| 611 |
+
restype_atom14_to_atom37.append([0] * 14)
|
| 612 |
+
restype_atom37_to_atom14.append([0] * 37)
|
| 613 |
+
restype_atom14_mask.append([0.0] * 14)
|
| 614 |
+
|
| 615 |
+
restype_atom14_to_atom37 = torch.tensor(
|
| 616 |
+
restype_atom14_to_atom37,
|
| 617 |
+
dtype=torch.int32,
|
| 618 |
+
device=protein["aatype"].device,
|
| 619 |
+
)
|
| 620 |
+
restype_atom37_to_atom14 = torch.tensor(
|
| 621 |
+
restype_atom37_to_atom14,
|
| 622 |
+
dtype=torch.int32,
|
| 623 |
+
device=protein["aatype"].device,
|
| 624 |
+
)
|
| 625 |
+
restype_atom14_mask = torch.tensor(
|
| 626 |
+
restype_atom14_mask,
|
| 627 |
+
dtype=torch.float32,
|
| 628 |
+
device=protein["aatype"].device,
|
| 629 |
+
)
|
| 630 |
+
protein_aatype = protein['aatype'].to(torch.long)
|
| 631 |
+
|
| 632 |
+
# create the mapping for (residx, atom14) --> atom37, i.e. an array
|
| 633 |
+
# with shape (num_res, 14) containing the atom37 indices for this protein
|
| 634 |
+
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
|
| 635 |
+
residx_atom14_mask = restype_atom14_mask[protein_aatype]
|
| 636 |
+
|
| 637 |
+
protein["atom14_atom_exists"] = residx_atom14_mask
|
| 638 |
+
protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
|
| 639 |
+
|
| 640 |
+
# create the gather indices for mapping back
|
| 641 |
+
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
|
| 642 |
+
protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
|
| 643 |
+
|
| 644 |
+
# create the corresponding mask
|
| 645 |
+
restype_atom37_mask = torch.zeros(
|
| 646 |
+
[21, 37], dtype=torch.float32, device=protein["aatype"].device
|
| 647 |
+
)
|
| 648 |
+
for restype, restype_letter in enumerate(rc.restypes):
|
| 649 |
+
restype_name = rc.restype_1to3[restype_letter]
|
| 650 |
+
atom_names = rc.residue_atoms[restype_name]
|
| 651 |
+
for atom_name in atom_names:
|
| 652 |
+
atom_type = rc.atom_order[atom_name]
|
| 653 |
+
restype_atom37_mask[restype, atom_type] = 1
|
| 654 |
+
|
| 655 |
+
residx_atom37_mask = restype_atom37_mask[protein_aatype]
|
| 656 |
+
protein["atom37_atom_exists"] = residx_atom37_mask
|
| 657 |
+
|
| 658 |
+
return protein
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def make_atom14_masks_np(batch):
|
| 662 |
+
batch = tree_map(
|
| 663 |
+
lambda n: torch.tensor(n, device=batch["aatype"].device),
|
| 664 |
+
batch,
|
| 665 |
+
np.ndarray
|
| 666 |
+
)
|
| 667 |
+
out = make_atom14_masks(batch)
|
| 668 |
+
out = tensor_tree_map(lambda t: np.array(t), out)
|
| 669 |
+
return out
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def make_atom14_positions(protein):
|
| 673 |
+
"""Constructs denser atom positions (14 dimensions instead of 37)."""
|
| 674 |
+
residx_atom14_mask = protein["atom14_atom_exists"]
|
| 675 |
+
residx_atom14_to_atom37 = protein["residx_atom14_to_atom37"]
|
| 676 |
+
|
| 677 |
+
# Create a mask for known ground truth positions.
|
| 678 |
+
residx_atom14_gt_mask = residx_atom14_mask * batched_gather(
|
| 679 |
+
protein["all_atom_mask"],
|
| 680 |
+
residx_atom14_to_atom37,
|
| 681 |
+
dim=-1,
|
| 682 |
+
no_batch_dims=len(protein["all_atom_mask"].shape[:-1]),
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
# Gather the ground truth positions.
|
| 686 |
+
residx_atom14_gt_positions = residx_atom14_gt_mask[..., None] * (
|
| 687 |
+
batched_gather(
|
| 688 |
+
protein["all_atom_positions"],
|
| 689 |
+
residx_atom14_to_atom37,
|
| 690 |
+
dim=-2,
|
| 691 |
+
no_batch_dims=len(protein["all_atom_positions"].shape[:-2]),
|
| 692 |
+
)
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
protein["atom14_atom_exists"] = residx_atom14_mask
|
| 696 |
+
protein["atom14_gt_exists"] = residx_atom14_gt_mask
|
| 697 |
+
protein["atom14_gt_positions"] = residx_atom14_gt_positions
|
| 698 |
+
|
| 699 |
+
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
|
| 700 |
+
# alternative ground truth coordinates where the naming is swapped
|
| 701 |
+
restype_3 = [rc.restype_1to3[res] for res in rc.restypes]
|
| 702 |
+
restype_3 += ["UNK"]
|
| 703 |
+
|
| 704 |
+
# Matrices for renaming ambiguous atoms.
|
| 705 |
+
all_matrices = {
|
| 706 |
+
res: torch.eye(
|
| 707 |
+
14,
|
| 708 |
+
dtype=protein["all_atom_mask"].dtype,
|
| 709 |
+
device=protein["all_atom_mask"].device,
|
| 710 |
+
)
|
| 711 |
+
for res in restype_3
|
| 712 |
+
}
|
| 713 |
+
for resname, swap in rc.residue_atom_renaming_swaps.items():
|
| 714 |
+
correspondences = torch.arange(
|
| 715 |
+
14, device=protein["all_atom_mask"].device
|
| 716 |
+
)
|
| 717 |
+
for source_atom_swap, target_atom_swap in swap.items():
|
| 718 |
+
source_index = rc.restype_name_to_atom14_names[resname].index(
|
| 719 |
+
source_atom_swap
|
| 720 |
+
)
|
| 721 |
+
target_index = rc.restype_name_to_atom14_names[resname].index(
|
| 722 |
+
target_atom_swap
|
| 723 |
+
)
|
| 724 |
+
correspondences[source_index] = target_index
|
| 725 |
+
correspondences[target_index] = source_index
|
| 726 |
+
renaming_matrix = protein["all_atom_mask"].new_zeros((14, 14))
|
| 727 |
+
for index, correspondence in enumerate(correspondences):
|
| 728 |
+
renaming_matrix[index, correspondence] = 1.0
|
| 729 |
+
all_matrices[resname] = renaming_matrix
|
| 730 |
+
|
| 731 |
+
renaming_matrices = torch.stack(
|
| 732 |
+
[all_matrices[restype] for restype in restype_3]
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
# Pick the transformation matrices for the given residue sequence
|
| 736 |
+
# shape (num_res, 14, 14).
|
| 737 |
+
renaming_transform = renaming_matrices[protein["aatype"]]
|
| 738 |
+
|
| 739 |
+
# Apply it to the ground truth positions. shape (num_res, 14, 3).
|
| 740 |
+
alternative_gt_positions = torch.einsum(
|
| 741 |
+
"...rac,...rab->...rbc", residx_atom14_gt_positions, renaming_transform
|
| 742 |
+
)
|
| 743 |
+
protein["atom14_alt_gt_positions"] = alternative_gt_positions
|
| 744 |
+
|
| 745 |
+
# Create the mask for the alternative ground truth (differs from the
|
| 746 |
+
# ground truth mask, if only one of the atoms in an ambiguous pair has a
|
| 747 |
+
# ground truth position).
|
| 748 |
+
alternative_gt_mask = torch.einsum(
|
| 749 |
+
"...ra,...rab->...rb", residx_atom14_gt_mask, renaming_transform
|
| 750 |
+
)
|
| 751 |
+
protein["atom14_alt_gt_exists"] = alternative_gt_mask
|
| 752 |
+
|
| 753 |
+
# Create an ambiguous atoms mask. shape: (21, 14).
|
| 754 |
+
restype_atom14_is_ambiguous = protein["all_atom_mask"].new_zeros((21, 14))
|
| 755 |
+
for resname, swap in rc.residue_atom_renaming_swaps.items():
|
| 756 |
+
for atom_name1, atom_name2 in swap.items():
|
| 757 |
+
restype = rc.restype_order[rc.restype_3to1[resname]]
|
| 758 |
+
atom_idx1 = rc.restype_name_to_atom14_names[resname].index(
|
| 759 |
+
atom_name1
|
| 760 |
+
)
|
| 761 |
+
atom_idx2 = rc.restype_name_to_atom14_names[resname].index(
|
| 762 |
+
atom_name2
|
| 763 |
+
)
|
| 764 |
+
restype_atom14_is_ambiguous[restype, atom_idx1] = 1
|
| 765 |
+
restype_atom14_is_ambiguous[restype, atom_idx2] = 1
|
| 766 |
+
|
| 767 |
+
# From this create an ambiguous_mask for the given sequence.
|
| 768 |
+
protein["atom14_atom_is_ambiguous"] = restype_atom14_is_ambiguous[
|
| 769 |
+
protein["aatype"]
|
| 770 |
+
]
|
| 771 |
+
|
| 772 |
+
return protein
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
def atom37_to_frames(protein, eps=1e-8):
|
| 776 |
+
aatype = protein["aatype"]
|
| 777 |
+
all_atom_positions = protein["all_atom_positions"]
|
| 778 |
+
all_atom_mask = protein["all_atom_mask"]
|
| 779 |
+
|
| 780 |
+
batch_dims = len(aatype.shape[:-1])
|
| 781 |
+
|
| 782 |
+
restype_rigidgroup_base_atom_names = np.full([21, 8, 3], "", dtype=object)
|
| 783 |
+
restype_rigidgroup_base_atom_names[:, 0, :] = ["C", "CA", "N"]
|
| 784 |
+
restype_rigidgroup_base_atom_names[:, 3, :] = ["CA", "C", "O"]
|
| 785 |
+
|
| 786 |
+
for restype, restype_letter in enumerate(rc.restypes):
|
| 787 |
+
resname = rc.restype_1to3[restype_letter]
|
| 788 |
+
for chi_idx in range(4):
|
| 789 |
+
if rc.chi_angles_mask[restype][chi_idx]:
|
| 790 |
+
names = rc.chi_angles_atoms[resname][chi_idx]
|
| 791 |
+
restype_rigidgroup_base_atom_names[
|
| 792 |
+
restype, chi_idx + 4, :
|
| 793 |
+
] = names[1:]
|
| 794 |
+
|
| 795 |
+
restype_rigidgroup_mask = all_atom_mask.new_zeros(
|
| 796 |
+
(*aatype.shape[:-1], 21, 8),
|
| 797 |
+
)
|
| 798 |
+
restype_rigidgroup_mask[..., 0] = 1
|
| 799 |
+
restype_rigidgroup_mask[..., 3] = 1
|
| 800 |
+
restype_rigidgroup_mask[..., :20, 4:] = all_atom_mask.new_tensor(
|
| 801 |
+
rc.chi_angles_mask
|
| 802 |
+
)
|
| 803 |
+
|
| 804 |
+
lookuptable = rc.atom_order.copy()
|
| 805 |
+
lookuptable[""] = 0
|
| 806 |
+
lookup = np.vectorize(lambda x: lookuptable[x])
|
| 807 |
+
restype_rigidgroup_base_atom37_idx = lookup(
|
| 808 |
+
restype_rigidgroup_base_atom_names,
|
| 809 |
+
)
|
| 810 |
+
restype_rigidgroup_base_atom37_idx = aatype.new_tensor(
|
| 811 |
+
restype_rigidgroup_base_atom37_idx,
|
| 812 |
+
)
|
| 813 |
+
restype_rigidgroup_base_atom37_idx = (
|
| 814 |
+
restype_rigidgroup_base_atom37_idx.view(
|
| 815 |
+
*((1,) * batch_dims), *restype_rigidgroup_base_atom37_idx.shape
|
| 816 |
+
)
|
| 817 |
+
)
|
| 818 |
+
|
| 819 |
+
residx_rigidgroup_base_atom37_idx = batched_gather(
|
| 820 |
+
restype_rigidgroup_base_atom37_idx,
|
| 821 |
+
aatype,
|
| 822 |
+
dim=-3,
|
| 823 |
+
no_batch_dims=batch_dims,
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
base_atom_pos = batched_gather(
|
| 827 |
+
all_atom_positions,
|
| 828 |
+
residx_rigidgroup_base_atom37_idx,
|
| 829 |
+
dim=-2,
|
| 830 |
+
no_batch_dims=len(all_atom_positions.shape[:-2]),
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
gt_frames = Rigid.from_3_points(
|
| 834 |
+
p_neg_x_axis=base_atom_pos[..., 0, :],
|
| 835 |
+
origin=base_atom_pos[..., 1, :],
|
| 836 |
+
p_xy_plane=base_atom_pos[..., 2, :],
|
| 837 |
+
eps=eps,
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
group_exists = batched_gather(
|
| 841 |
+
restype_rigidgroup_mask,
|
| 842 |
+
aatype,
|
| 843 |
+
dim=-2,
|
| 844 |
+
no_batch_dims=batch_dims,
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
gt_atoms_exist = batched_gather(
|
| 848 |
+
all_atom_mask,
|
| 849 |
+
residx_rigidgroup_base_atom37_idx,
|
| 850 |
+
dim=-1,
|
| 851 |
+
no_batch_dims=len(all_atom_mask.shape[:-1]),
|
| 852 |
+
)
|
| 853 |
+
gt_exists = torch.min(gt_atoms_exist, dim=-1)[0] * group_exists
|
| 854 |
+
|
| 855 |
+
rots = torch.eye(3, dtype=all_atom_mask.dtype, device=aatype.device)
|
| 856 |
+
rots = torch.tile(rots, (*((1,) * batch_dims), 8, 1, 1))
|
| 857 |
+
rots[..., 0, 0, 0] = -1
|
| 858 |
+
rots[..., 0, 2, 2] = -1
|
| 859 |
+
rots = Rotation(rot_mats=rots)
|
| 860 |
+
|
| 861 |
+
gt_frames = gt_frames.compose(Rigid(rots, None))
|
| 862 |
+
|
| 863 |
+
restype_rigidgroup_is_ambiguous = all_atom_mask.new_zeros(
|
| 864 |
+
*((1,) * batch_dims), 21, 8
|
| 865 |
+
)
|
| 866 |
+
restype_rigidgroup_rots = torch.eye(
|
| 867 |
+
3, dtype=all_atom_mask.dtype, device=aatype.device
|
| 868 |
+
)
|
| 869 |
+
restype_rigidgroup_rots = torch.tile(
|
| 870 |
+
restype_rigidgroup_rots,
|
| 871 |
+
(*((1,) * batch_dims), 21, 8, 1, 1),
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
for resname, _ in rc.residue_atom_renaming_swaps.items():
|
| 875 |
+
restype = rc.restype_order[rc.restype_3to1[resname]]
|
| 876 |
+
chi_idx = int(sum(rc.chi_angles_mask[restype]) - 1)
|
| 877 |
+
restype_rigidgroup_is_ambiguous[..., restype, chi_idx + 4] = 1
|
| 878 |
+
restype_rigidgroup_rots[..., restype, chi_idx + 4, 1, 1] = -1
|
| 879 |
+
restype_rigidgroup_rots[..., restype, chi_idx + 4, 2, 2] = -1
|
| 880 |
+
|
| 881 |
+
residx_rigidgroup_is_ambiguous = batched_gather(
|
| 882 |
+
restype_rigidgroup_is_ambiguous,
|
| 883 |
+
aatype,
|
| 884 |
+
dim=-2,
|
| 885 |
+
no_batch_dims=batch_dims,
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
residx_rigidgroup_ambiguity_rot = batched_gather(
|
| 889 |
+
restype_rigidgroup_rots,
|
| 890 |
+
aatype,
|
| 891 |
+
dim=-4,
|
| 892 |
+
no_batch_dims=batch_dims,
|
| 893 |
+
)
|
| 894 |
+
|
| 895 |
+
residx_rigidgroup_ambiguity_rot = Rotation(
|
| 896 |
+
rot_mats=residx_rigidgroup_ambiguity_rot
|
| 897 |
+
)
|
| 898 |
+
alt_gt_frames = gt_frames.compose(
|
| 899 |
+
Rigid(residx_rigidgroup_ambiguity_rot, None)
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
gt_frames_tensor = gt_frames.to_tensor_4x4()
|
| 903 |
+
alt_gt_frames_tensor = alt_gt_frames.to_tensor_4x4()
|
| 904 |
+
|
| 905 |
+
protein["rigidgroups_gt_frames"] = gt_frames_tensor
|
| 906 |
+
protein["rigidgroups_gt_exists"] = gt_exists
|
| 907 |
+
protein["rigidgroups_group_exists"] = group_exists
|
| 908 |
+
protein["rigidgroups_group_is_ambiguous"] = residx_rigidgroup_is_ambiguous
|
| 909 |
+
protein["rigidgroups_alt_gt_frames"] = alt_gt_frames_tensor
|
| 910 |
+
|
| 911 |
+
return protein
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def get_chi_atom_indices():
|
| 915 |
+
"""Returns atom indices needed to compute chi angles for all residue types.
|
| 916 |
+
|
| 917 |
+
Returns:
|
| 918 |
+
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
|
| 919 |
+
in the order specified in rc.restypes + unknown residue type
|
| 920 |
+
at the end. For chi angles which are not defined on the residue, the
|
| 921 |
+
positions indices are by default set to 0.
|
| 922 |
+
"""
|
| 923 |
+
chi_atom_indices = []
|
| 924 |
+
for residue_name in rc.restypes:
|
| 925 |
+
residue_name = rc.restype_1to3[residue_name]
|
| 926 |
+
residue_chi_angles = rc.chi_angles_atoms[residue_name]
|
| 927 |
+
atom_indices = []
|
| 928 |
+
for chi_angle in residue_chi_angles:
|
| 929 |
+
atom_indices.append([rc.atom_order[atom] for atom in chi_angle])
|
| 930 |
+
for _ in range(4 - len(atom_indices)):
|
| 931 |
+
atom_indices.append(
|
| 932 |
+
[0, 0, 0, 0]
|
| 933 |
+
) # For chi angles not defined on the AA.
|
| 934 |
+
chi_atom_indices.append(atom_indices)
|
| 935 |
+
|
| 936 |
+
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
|
| 937 |
+
|
| 938 |
+
return chi_atom_indices
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
@curry1
|
| 942 |
+
def atom37_to_torsion_angles(
|
| 943 |
+
protein,
|
| 944 |
+
prefix="",
|
| 945 |
+
):
|
| 946 |
+
"""
|
| 947 |
+
Convert coordinates to torsion angles.
|
| 948 |
+
|
| 949 |
+
This function is extremely sensitive to floating point imprecisions
|
| 950 |
+
and should be run with double precision whenever possible.
|
| 951 |
+
|
| 952 |
+
Args:
|
| 953 |
+
Dict containing:
|
| 954 |
+
* (prefix)aatype:
|
| 955 |
+
[*, N_res] residue indices
|
| 956 |
+
* (prefix)all_atom_positions:
|
| 957 |
+
[*, N_res, 37, 3] atom positions (in atom37
|
| 958 |
+
format)
|
| 959 |
+
* (prefix)all_atom_mask:
|
| 960 |
+
[*, N_res, 37] atom position mask
|
| 961 |
+
Returns:
|
| 962 |
+
The same dictionary updated with the following features:
|
| 963 |
+
|
| 964 |
+
"(prefix)torsion_angles_sin_cos" ([*, N_res, 7, 2])
|
| 965 |
+
Torsion angles
|
| 966 |
+
"(prefix)alt_torsion_angles_sin_cos" ([*, N_res, 7, 2])
|
| 967 |
+
Alternate torsion angles (accounting for 180-degree symmetry)
|
| 968 |
+
"(prefix)torsion_angles_mask" ([*, N_res, 7])
|
| 969 |
+
Torsion angles mask
|
| 970 |
+
"""
|
| 971 |
+
aatype = protein[prefix + "aatype"]
|
| 972 |
+
all_atom_positions = protein[prefix + "all_atom_positions"]
|
| 973 |
+
all_atom_mask = protein[prefix + "all_atom_mask"]
|
| 974 |
+
|
| 975 |
+
aatype = torch.clamp(aatype, max=20)
|
| 976 |
+
|
| 977 |
+
pad = all_atom_positions.new_zeros(
|
| 978 |
+
[*all_atom_positions.shape[:-3], 1, 37, 3]
|
| 979 |
+
)
|
| 980 |
+
prev_all_atom_positions = torch.cat(
|
| 981 |
+
[pad, all_atom_positions[..., :-1, :, :]], dim=-3
|
| 982 |
+
)
|
| 983 |
+
|
| 984 |
+
pad = all_atom_mask.new_zeros([*all_atom_mask.shape[:-2], 1, 37])
|
| 985 |
+
prev_all_atom_mask = torch.cat([pad, all_atom_mask[..., :-1, :]], dim=-2)
|
| 986 |
+
|
| 987 |
+
pre_omega_atom_pos = torch.cat(
|
| 988 |
+
[prev_all_atom_positions[..., 1:3, :], all_atom_positions[..., :2, :]],
|
| 989 |
+
dim=-2,
|
| 990 |
+
)
|
| 991 |
+
phi_atom_pos = torch.cat(
|
| 992 |
+
[prev_all_atom_positions[..., 2:3, :], all_atom_positions[..., :3, :]],
|
| 993 |
+
dim=-2,
|
| 994 |
+
)
|
| 995 |
+
psi_atom_pos = torch.cat(
|
| 996 |
+
[all_atom_positions[..., :3, :], all_atom_positions[..., 4:5, :]],
|
| 997 |
+
dim=-2,
|
| 998 |
+
)
|
| 999 |
+
|
| 1000 |
+
pre_omega_mask = torch.prod(
|
| 1001 |
+
prev_all_atom_mask[..., 1:3], dim=-1
|
| 1002 |
+
) * torch.prod(all_atom_mask[..., :2], dim=-1)
|
| 1003 |
+
phi_mask = prev_all_atom_mask[..., 2] * torch.prod(
|
| 1004 |
+
all_atom_mask[..., :3], dim=-1, dtype=all_atom_mask.dtype
|
| 1005 |
+
)
|
| 1006 |
+
psi_mask = (
|
| 1007 |
+
torch.prod(all_atom_mask[..., :3], dim=-1, dtype=all_atom_mask.dtype)
|
| 1008 |
+
* all_atom_mask[..., 4]
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
chi_atom_indices = torch.as_tensor(
|
| 1012 |
+
get_chi_atom_indices(), device=aatype.device
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
atom_indices = chi_atom_indices[..., aatype, :, :]
|
| 1016 |
+
chis_atom_pos = batched_gather(
|
| 1017 |
+
all_atom_positions, atom_indices, -2, len(atom_indices.shape[:-2])
|
| 1018 |
+
)
|
| 1019 |
+
|
| 1020 |
+
chi_angles_mask = list(rc.chi_angles_mask)
|
| 1021 |
+
chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
|
| 1022 |
+
chi_angles_mask = all_atom_mask.new_tensor(chi_angles_mask)
|
| 1023 |
+
|
| 1024 |
+
chis_mask = chi_angles_mask[aatype, :]
|
| 1025 |
+
|
| 1026 |
+
chi_angle_atoms_mask = batched_gather(
|
| 1027 |
+
all_atom_mask,
|
| 1028 |
+
atom_indices,
|
| 1029 |
+
dim=-1,
|
| 1030 |
+
no_batch_dims=len(atom_indices.shape[:-2]),
|
| 1031 |
+
)
|
| 1032 |
+
chi_angle_atoms_mask = torch.prod(
|
| 1033 |
+
chi_angle_atoms_mask, dim=-1, dtype=chi_angle_atoms_mask.dtype
|
| 1034 |
+
)
|
| 1035 |
+
chis_mask = chis_mask * chi_angle_atoms_mask
|
| 1036 |
+
|
| 1037 |
+
torsions_atom_pos = torch.cat(
|
| 1038 |
+
[
|
| 1039 |
+
pre_omega_atom_pos[..., None, :, :],
|
| 1040 |
+
phi_atom_pos[..., None, :, :],
|
| 1041 |
+
psi_atom_pos[..., None, :, :],
|
| 1042 |
+
chis_atom_pos,
|
| 1043 |
+
],
|
| 1044 |
+
dim=-3,
|
| 1045 |
+
)
|
| 1046 |
+
|
| 1047 |
+
torsion_angles_mask = torch.cat(
|
| 1048 |
+
[
|
| 1049 |
+
pre_omega_mask[..., None],
|
| 1050 |
+
phi_mask[..., None],
|
| 1051 |
+
psi_mask[..., None],
|
| 1052 |
+
chis_mask,
|
| 1053 |
+
],
|
| 1054 |
+
dim=-1,
|
| 1055 |
+
)
|
| 1056 |
+
|
| 1057 |
+
torsion_frames = Rigid.from_3_points(
|
| 1058 |
+
torsions_atom_pos[..., 1, :],
|
| 1059 |
+
torsions_atom_pos[..., 2, :],
|
| 1060 |
+
torsions_atom_pos[..., 0, :],
|
| 1061 |
+
eps=1e-8,
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
fourth_atom_rel_pos = torsion_frames.invert().apply(
|
| 1065 |
+
torsions_atom_pos[..., 3, :]
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
torsion_angles_sin_cos = torch.stack(
|
| 1069 |
+
[fourth_atom_rel_pos[..., 2], fourth_atom_rel_pos[..., 1]], dim=-1
|
| 1070 |
+
)
|
| 1071 |
+
|
| 1072 |
+
denom = torch.sqrt(
|
| 1073 |
+
torch.sum(
|
| 1074 |
+
torch.square(torsion_angles_sin_cos),
|
| 1075 |
+
dim=-1,
|
| 1076 |
+
dtype=torsion_angles_sin_cos.dtype,
|
| 1077 |
+
keepdims=True,
|
| 1078 |
+
)
|
| 1079 |
+
+ 1e-8
|
| 1080 |
+
)
|
| 1081 |
+
torsion_angles_sin_cos = torsion_angles_sin_cos / denom
|
| 1082 |
+
|
| 1083 |
+
torsion_angles_sin_cos = torsion_angles_sin_cos * all_atom_mask.new_tensor(
|
| 1084 |
+
[1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0],
|
| 1085 |
+
)[((None,) * len(torsion_angles_sin_cos.shape[:-2])) + (slice(None), None)]
|
| 1086 |
+
|
| 1087 |
+
chi_is_ambiguous = torsion_angles_sin_cos.new_tensor(
|
| 1088 |
+
rc.chi_pi_periodic,
|
| 1089 |
+
)[aatype, ...]
|
| 1090 |
+
|
| 1091 |
+
mirror_torsion_angles = torch.cat(
|
| 1092 |
+
[
|
| 1093 |
+
all_atom_mask.new_ones(*aatype.shape, 3),
|
| 1094 |
+
1.0 - 2.0 * chi_is_ambiguous,
|
| 1095 |
+
],
|
| 1096 |
+
dim=-1,
|
| 1097 |
+
)
|
| 1098 |
+
|
| 1099 |
+
alt_torsion_angles_sin_cos = (
|
| 1100 |
+
torsion_angles_sin_cos * mirror_torsion_angles[..., None]
|
| 1101 |
+
)
|
| 1102 |
+
|
| 1103 |
+
protein[prefix + "torsion_angles_sin_cos"] = torsion_angles_sin_cos
|
| 1104 |
+
protein[prefix + "alt_torsion_angles_sin_cos"] = alt_torsion_angles_sin_cos
|
| 1105 |
+
protein[prefix + "torsion_angles_mask"] = torsion_angles_mask
|
| 1106 |
+
|
| 1107 |
+
return protein
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
def get_backbone_frames(protein):
|
| 1111 |
+
# DISCREPANCY: AlphaFold uses tensor_7s here. I don't know why.
|
| 1112 |
+
protein["backbone_rigid_tensor"] = protein["rigidgroups_gt_frames"][
|
| 1113 |
+
..., 0, :, :
|
| 1114 |
+
]
|
| 1115 |
+
protein["backbone_rigid_mask"] = protein["rigidgroups_gt_exists"][..., 0]
|
| 1116 |
+
|
| 1117 |
+
return protein
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
def get_chi_angles(protein):
|
| 1121 |
+
dtype = protein["all_atom_mask"].dtype
|
| 1122 |
+
protein["chi_angles_sin_cos"] = (
|
| 1123 |
+
protein["torsion_angles_sin_cos"][..., 3:, :]
|
| 1124 |
+
).to(dtype)
|
| 1125 |
+
protein["chi_mask"] = protein["torsion_angles_mask"][..., 3:].to(dtype)
|
| 1126 |
+
|
| 1127 |
+
return protein
|
| 1128 |
+
|
| 1129 |
+
|
| 1130 |
+
@curry1
|
| 1131 |
+
def random_crop_to_size(
|
| 1132 |
+
protein,
|
| 1133 |
+
crop_size,
|
| 1134 |
+
max_templates,
|
| 1135 |
+
shape_schema,
|
| 1136 |
+
subsample_templates=False,
|
| 1137 |
+
seed=None,
|
| 1138 |
+
):
|
| 1139 |
+
"""Crop randomly to `crop_size`, or keep as is if shorter than that."""
|
| 1140 |
+
# We want each ensemble to be cropped the same way
|
| 1141 |
+
g = torch.Generator(device=protein["seq_length"].device)
|
| 1142 |
+
if seed is not None:
|
| 1143 |
+
g.manual_seed(seed)
|
| 1144 |
+
|
| 1145 |
+
seq_length = protein["seq_length"]
|
| 1146 |
+
|
| 1147 |
+
if "template_mask" in protein:
|
| 1148 |
+
num_templates = protein["template_mask"].shape[-1]
|
| 1149 |
+
else:
|
| 1150 |
+
num_templates = 0
|
| 1151 |
+
|
| 1152 |
+
# No need to subsample templates if there aren't any
|
| 1153 |
+
subsample_templates = subsample_templates and num_templates
|
| 1154 |
+
|
| 1155 |
+
num_res_crop_size = min(int(seq_length), crop_size)
|
| 1156 |
+
|
| 1157 |
+
def _randint(lower, upper):
|
| 1158 |
+
return int(torch.randint(
|
| 1159 |
+
lower,
|
| 1160 |
+
upper + 1,
|
| 1161 |
+
(1,),
|
| 1162 |
+
device=protein["seq_length"].device,
|
| 1163 |
+
generator=g,
|
| 1164 |
+
)[0])
|
| 1165 |
+
|
| 1166 |
+
if subsample_templates:
|
| 1167 |
+
templates_crop_start = _randint(0, num_templates)
|
| 1168 |
+
templates_select_indices = torch.randperm(
|
| 1169 |
+
num_templates, device=protein["seq_length"].device, generator=g
|
| 1170 |
+
)
|
| 1171 |
+
else:
|
| 1172 |
+
templates_crop_start = 0
|
| 1173 |
+
|
| 1174 |
+
num_templates_crop_size = min(
|
| 1175 |
+
num_templates - templates_crop_start, max_templates
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
n = seq_length - num_res_crop_size
|
| 1179 |
+
if "use_clamped_fape" in protein and protein["use_clamped_fape"] == 1.:
|
| 1180 |
+
right_anchor = n
|
| 1181 |
+
else:
|
| 1182 |
+
x = _randint(0, n)
|
| 1183 |
+
right_anchor = n - x
|
| 1184 |
+
|
| 1185 |
+
num_res_crop_start = _randint(0, right_anchor)
|
| 1186 |
+
|
| 1187 |
+
for k, v in protein.items():
|
| 1188 |
+
if k not in shape_schema or (
|
| 1189 |
+
"template" not in k and NUM_RES not in shape_schema[k]
|
| 1190 |
+
):
|
| 1191 |
+
continue
|
| 1192 |
+
|
| 1193 |
+
# randomly permute the templates before cropping them.
|
| 1194 |
+
if k.startswith("template") and subsample_templates:
|
| 1195 |
+
v = v[templates_select_indices]
|
| 1196 |
+
|
| 1197 |
+
slices = []
|
| 1198 |
+
for i, (dim_size, dim) in enumerate(zip(shape_schema[k], v.shape)):
|
| 1199 |
+
is_num_res = dim_size == NUM_RES
|
| 1200 |
+
if i == 0 and k.startswith("template"):
|
| 1201 |
+
crop_size = num_templates_crop_size
|
| 1202 |
+
crop_start = templates_crop_start
|
| 1203 |
+
else:
|
| 1204 |
+
crop_start = num_res_crop_start if is_num_res else 0
|
| 1205 |
+
crop_size = num_res_crop_size if is_num_res else dim
|
| 1206 |
+
slices.append(slice(crop_start, crop_start + crop_size))
|
| 1207 |
+
protein[k] = v[slices]
|
| 1208 |
+
|
| 1209 |
+
protein["seq_length"] = protein["seq_length"].new_tensor(num_res_crop_size)
|
| 1210 |
+
|
| 1211 |
+
return protein
|
openfold/protein.py
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""Protein data type."""
|
| 17 |
+
import dataclasses
|
| 18 |
+
import io
|
| 19 |
+
from typing import Any, Sequence, Mapping, Optional
|
| 20 |
+
import re
|
| 21 |
+
import string
|
| 22 |
+
|
| 23 |
+
from . import residue_constants
|
| 24 |
+
from Bio.PDB import PDBParser
|
| 25 |
+
import numpy as np
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
FeatureDict = Mapping[str, np.ndarray]
|
| 29 |
+
ModelOutput = Mapping[str, Any] # Is a nested dict.
|
| 30 |
+
PICO_TO_ANGSTROM = 0.01
|
| 31 |
+
|
| 32 |
+
@dataclasses.dataclass(frozen=True)
|
| 33 |
+
class Protein:
|
| 34 |
+
"""Protein structure representation."""
|
| 35 |
+
|
| 36 |
+
# Cartesian coordinates of atoms in angstroms. The atom types correspond to
|
| 37 |
+
# residue_constants.atom_types, i.e. the first three are N, CA, CB.
|
| 38 |
+
atom_positions: np.ndarray # [num_res, num_atom_type, 3]
|
| 39 |
+
|
| 40 |
+
# Amino-acid type for each residue represented as an integer between 0 and
|
| 41 |
+
# 20, where 20 is 'X'.
|
| 42 |
+
aatype: np.ndarray # [num_res]
|
| 43 |
+
|
| 44 |
+
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
|
| 45 |
+
# is present and 0.0 if not. This should be used for loss masking.
|
| 46 |
+
atom_mask: np.ndarray # [num_res, num_atom_type]
|
| 47 |
+
|
| 48 |
+
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
|
| 49 |
+
residue_index: np.ndarray # [num_res]
|
| 50 |
+
|
| 51 |
+
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
|
| 52 |
+
# representing the displacement of the residue from its ground truth mean
|
| 53 |
+
# value.
|
| 54 |
+
b_factors: np.ndarray # [num_res, num_atom_type]
|
| 55 |
+
|
| 56 |
+
# Chain indices for multi-chain predictions
|
| 57 |
+
chain_index: Optional[np.ndarray] = None
|
| 58 |
+
|
| 59 |
+
# Optional remark about the protein. Included as a comment in output PDB
|
| 60 |
+
# files
|
| 61 |
+
remark: Optional[str] = None
|
| 62 |
+
|
| 63 |
+
# Templates used to generate this protein (prediction-only)
|
| 64 |
+
parents: Optional[Sequence[str]] = None
|
| 65 |
+
|
| 66 |
+
# Chain corresponding to each parent
|
| 67 |
+
parents_chain_index: Optional[Sequence[int]] = None
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
|
| 71 |
+
"""Takes a PDB string and constructs a Protein object.
|
| 72 |
+
WARNING: All non-standard residue types will be converted into UNK. All
|
| 73 |
+
non-standard atoms will be ignored.
|
| 74 |
+
Args:
|
| 75 |
+
pdb_str: The contents of the pdb file
|
| 76 |
+
chain_id: If None, then the pdb file must contain a single chain (which
|
| 77 |
+
will be parsed). If chain_id is specified (e.g. A), then only that chain
|
| 78 |
+
is parsed.
|
| 79 |
+
Returns:
|
| 80 |
+
A new `Protein` parsed from the pdb contents.
|
| 81 |
+
"""
|
| 82 |
+
pdb_fh = io.StringIO(pdb_str)
|
| 83 |
+
parser = PDBParser(QUIET=True)
|
| 84 |
+
structure = parser.get_structure("none", pdb_fh)
|
| 85 |
+
models = list(structure.get_models())
|
| 86 |
+
if len(models) != 1:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
f"Only single model PDBs are supported. Found {len(models)} models."
|
| 89 |
+
)
|
| 90 |
+
model = models[0]
|
| 91 |
+
|
| 92 |
+
atom_positions = []
|
| 93 |
+
aatype = []
|
| 94 |
+
atom_mask = []
|
| 95 |
+
residue_index = []
|
| 96 |
+
chain_ids = []
|
| 97 |
+
b_factors = []
|
| 98 |
+
|
| 99 |
+
for chain in model:
|
| 100 |
+
if(chain_id is not None and chain.id != chain_id):
|
| 101 |
+
continue
|
| 102 |
+
for res in chain:
|
| 103 |
+
if res.id[2] != " ":
|
| 104 |
+
raise ValueError(
|
| 105 |
+
f"PDB contains an insertion code at chain {chain.id} and residue "
|
| 106 |
+
f"index {res.id[1]}. These are not supported."
|
| 107 |
+
)
|
| 108 |
+
res_shortname = residue_constants.restype_3to1.get(res.resname, "X")
|
| 109 |
+
restype_idx = residue_constants.restype_order.get(
|
| 110 |
+
res_shortname, residue_constants.restype_num
|
| 111 |
+
)
|
| 112 |
+
pos = np.zeros((residue_constants.atom_type_num, 3))
|
| 113 |
+
mask = np.zeros((residue_constants.atom_type_num,))
|
| 114 |
+
res_b_factors = np.zeros((residue_constants.atom_type_num,))
|
| 115 |
+
for atom in res:
|
| 116 |
+
if atom.name not in residue_constants.atom_types:
|
| 117 |
+
continue
|
| 118 |
+
pos[residue_constants.atom_order[atom.name]] = atom.coord
|
| 119 |
+
mask[residue_constants.atom_order[atom.name]] = 1.0
|
| 120 |
+
res_b_factors[
|
| 121 |
+
residue_constants.atom_order[atom.name]
|
| 122 |
+
] = atom.bfactor
|
| 123 |
+
if np.sum(mask) < 0.5:
|
| 124 |
+
# If no known atom positions are reported for the residue then skip it.
|
| 125 |
+
continue
|
| 126 |
+
aatype.append(restype_idx)
|
| 127 |
+
atom_positions.append(pos)
|
| 128 |
+
atom_mask.append(mask)
|
| 129 |
+
residue_index.append(res.id[1])
|
| 130 |
+
chain_ids.append(chain.id)
|
| 131 |
+
b_factors.append(res_b_factors)
|
| 132 |
+
|
| 133 |
+
parents = None
|
| 134 |
+
parents_chain_index = None
|
| 135 |
+
if("PARENT" in pdb_str):
|
| 136 |
+
parents = []
|
| 137 |
+
parents_chain_index = []
|
| 138 |
+
chain_id = 0
|
| 139 |
+
for l in pdb_str.split("\n"):
|
| 140 |
+
if("PARENT" in l):
|
| 141 |
+
if(not "N/A" in l):
|
| 142 |
+
parent_names = l.split()[1:]
|
| 143 |
+
parents.extend(parent_names)
|
| 144 |
+
parents_chain_index.extend([
|
| 145 |
+
chain_id for _ in parent_names
|
| 146 |
+
])
|
| 147 |
+
chain_id += 1
|
| 148 |
+
|
| 149 |
+
unique_chain_ids = np.unique(chain_ids)
|
| 150 |
+
chain_id_mapping = {cid: n for n, cid in enumerate(string.ascii_uppercase)}
|
| 151 |
+
chain_index = np.array([chain_id_mapping[cid] for cid in chain_ids])
|
| 152 |
+
|
| 153 |
+
return Protein(
|
| 154 |
+
atom_positions=np.array(atom_positions),
|
| 155 |
+
atom_mask=np.array(atom_mask),
|
| 156 |
+
aatype=np.array(aatype),
|
| 157 |
+
residue_index=np.array(residue_index),
|
| 158 |
+
chain_index=chain_index,
|
| 159 |
+
b_factors=np.array(b_factors),
|
| 160 |
+
parents=parents,
|
| 161 |
+
parents_chain_index=parents_chain_index,
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def from_proteinnet_string(proteinnet_str: str) -> Protein:
|
| 166 |
+
tag_re = r'(\[[A-Z]+\]\n)'
|
| 167 |
+
tags = [
|
| 168 |
+
tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0
|
| 169 |
+
]
|
| 170 |
+
groups = zip(tags[0::2], [l.split('\n') for l in tags[1::2]])
|
| 171 |
+
|
| 172 |
+
atoms = ['N', 'CA', 'C']
|
| 173 |
+
aatype = None
|
| 174 |
+
atom_positions = None
|
| 175 |
+
atom_mask = None
|
| 176 |
+
for g in groups:
|
| 177 |
+
if("[PRIMARY]" == g[0]):
|
| 178 |
+
seq = g[1][0].strip()
|
| 179 |
+
for i in range(len(seq)):
|
| 180 |
+
if(seq[i] not in residue_constants.restypes):
|
| 181 |
+
seq[i] = 'X'
|
| 182 |
+
aatype = np.array([
|
| 183 |
+
residue_constants.restype_order.get(
|
| 184 |
+
res_symbol, residue_constants.restype_num
|
| 185 |
+
) for res_symbol in seq
|
| 186 |
+
])
|
| 187 |
+
elif("[TERTIARY]" == g[0]):
|
| 188 |
+
tertiary = []
|
| 189 |
+
for axis in range(3):
|
| 190 |
+
tertiary.append(list(map(float, g[1][axis].split())))
|
| 191 |
+
tertiary_np = np.array(tertiary)
|
| 192 |
+
atom_positions = np.zeros(
|
| 193 |
+
(len(tertiary[0])//3, residue_constants.atom_type_num, 3)
|
| 194 |
+
).astype(np.float32)
|
| 195 |
+
for i, atom in enumerate(atoms):
|
| 196 |
+
atom_positions[:, residue_constants.atom_order[atom], :] = (
|
| 197 |
+
np.transpose(tertiary_np[:, i::3])
|
| 198 |
+
)
|
| 199 |
+
atom_positions *= PICO_TO_ANGSTROM
|
| 200 |
+
elif("[MASK]" == g[0]):
|
| 201 |
+
mask = np.array(list(map({'-': 0, '+': 1}.get, g[1][0].strip())))
|
| 202 |
+
atom_mask = np.zeros(
|
| 203 |
+
(len(mask), residue_constants.atom_type_num,)
|
| 204 |
+
).astype(np.float32)
|
| 205 |
+
for i, atom in enumerate(atoms):
|
| 206 |
+
atom_mask[:, residue_constants.atom_order[atom]] = 1
|
| 207 |
+
atom_mask *= mask[..., None]
|
| 208 |
+
|
| 209 |
+
return Protein(
|
| 210 |
+
atom_positions=atom_positions,
|
| 211 |
+
atom_mask=atom_mask,
|
| 212 |
+
aatype=aatype,
|
| 213 |
+
residue_index=np.arange(len(aatype)),
|
| 214 |
+
b_factors=None,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def get_pdb_headers(prot: Protein, chain_id: int = 0) -> Sequence[str]:
|
| 219 |
+
pdb_headers = []
|
| 220 |
+
|
| 221 |
+
remark = prot.remark
|
| 222 |
+
if(remark is not None):
|
| 223 |
+
pdb_headers.append(f"REMARK {remark}")
|
| 224 |
+
|
| 225 |
+
parents = prot.parents
|
| 226 |
+
parents_chain_index = prot.parents_chain_index
|
| 227 |
+
if(parents_chain_index is not None):
|
| 228 |
+
parents = [
|
| 229 |
+
p for i, p in zip(parents_chain_index, parents) if i == chain_id
|
| 230 |
+
]
|
| 231 |
+
|
| 232 |
+
if(parents is None or len(parents) == 0):
|
| 233 |
+
parents = ["N/A"]
|
| 234 |
+
|
| 235 |
+
pdb_headers.append(f"PARENT {' '.join(parents)}")
|
| 236 |
+
|
| 237 |
+
return pdb_headers
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def add_pdb_headers(prot: Protein, pdb_str: str) -> str:
|
| 241 |
+
""" Add pdb headers to an existing PDB string. Useful during multi-chain
|
| 242 |
+
recycling
|
| 243 |
+
"""
|
| 244 |
+
out_pdb_lines = []
|
| 245 |
+
lines = pdb_str.split('\n')
|
| 246 |
+
|
| 247 |
+
remark = prot.remark
|
| 248 |
+
if(remark is not None):
|
| 249 |
+
out_pdb_lines.append(f"REMARK {remark}")
|
| 250 |
+
|
| 251 |
+
parents_per_chain = None
|
| 252 |
+
if(prot.parents is not None and len(prot.parents) > 0):
|
| 253 |
+
parents_per_chain = []
|
| 254 |
+
if(prot.parents_chain_index is not None):
|
| 255 |
+
cur_chain = prot.parents_chain_index[0]
|
| 256 |
+
parent_dict = {}
|
| 257 |
+
for p, i in zip(prot.parents, prot.parents_chain_index):
|
| 258 |
+
parent_dict.setdefault(str(i), [])
|
| 259 |
+
parent_dict[str(i)].append(p)
|
| 260 |
+
|
| 261 |
+
max_idx = max([int(chain_idx) for chain_idx in parent_dict])
|
| 262 |
+
for i in range(max_idx + 1):
|
| 263 |
+
chain_parents = parent_dict.get(str(i), ["N/A"])
|
| 264 |
+
parents_per_chain.append(chain_parents)
|
| 265 |
+
else:
|
| 266 |
+
parents_per_chain.append(prot.parents)
|
| 267 |
+
else:
|
| 268 |
+
parents_per_chain = [["N/A"]]
|
| 269 |
+
|
| 270 |
+
make_parent_line = lambda p: f"PARENT {' '.join(p)}"
|
| 271 |
+
|
| 272 |
+
out_pdb_lines.append(make_parent_line(parents_per_chain[0]))
|
| 273 |
+
|
| 274 |
+
chain_counter = 0
|
| 275 |
+
for i, l in enumerate(lines):
|
| 276 |
+
if("PARENT" not in l and "REMARK" not in l):
|
| 277 |
+
out_pdb_lines.append(l)
|
| 278 |
+
if("TER" in l and not "END" in lines[i + 1]):
|
| 279 |
+
chain_counter += 1
|
| 280 |
+
if(not chain_counter >= len(parents_per_chain)):
|
| 281 |
+
chain_parents = parents_per_chain[chain_counter]
|
| 282 |
+
else:
|
| 283 |
+
chain_parents = ["N/A"]
|
| 284 |
+
|
| 285 |
+
out_pdb_lines.append(make_parent_line(chain_parents))
|
| 286 |
+
|
| 287 |
+
return '\n'.join(out_pdb_lines)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def to_pdb(prot: Protein) -> str:
|
| 291 |
+
"""Converts a `Protein` instance to a PDB string.
|
| 292 |
+
Args:
|
| 293 |
+
prot: The protein to convert to PDB.
|
| 294 |
+
Returns:
|
| 295 |
+
PDB string.
|
| 296 |
+
"""
|
| 297 |
+
restypes = residue_constants.restypes + ["X"]
|
| 298 |
+
res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], "UNK")
|
| 299 |
+
atom_types = residue_constants.atom_types
|
| 300 |
+
|
| 301 |
+
pdb_lines = []
|
| 302 |
+
|
| 303 |
+
atom_mask = prot.atom_mask
|
| 304 |
+
aatype = prot.aatype
|
| 305 |
+
atom_positions = prot.atom_positions
|
| 306 |
+
residue_index = prot.residue_index.astype(np.int32)
|
| 307 |
+
b_factors = prot.b_factors
|
| 308 |
+
chain_index = prot.chain_index
|
| 309 |
+
|
| 310 |
+
if np.any(aatype > residue_constants.restype_num):
|
| 311 |
+
raise ValueError("Invalid aatypes.")
|
| 312 |
+
|
| 313 |
+
headers = get_pdb_headers(prot)
|
| 314 |
+
if(len(headers) > 0):
|
| 315 |
+
pdb_lines.extend(headers)
|
| 316 |
+
|
| 317 |
+
n = aatype.shape[0]
|
| 318 |
+
atom_index = 1
|
| 319 |
+
prev_chain_index = 0
|
| 320 |
+
chain_tags = string.ascii_uppercase
|
| 321 |
+
# Add all atom sites.
|
| 322 |
+
for i in range(n):
|
| 323 |
+
res_name_3 = res_1to3(aatype[i])
|
| 324 |
+
for atom_name, pos, mask, b_factor in zip(
|
| 325 |
+
atom_types, atom_positions[i], atom_mask[i], b_factors[i]
|
| 326 |
+
):
|
| 327 |
+
if mask < 0.5:
|
| 328 |
+
continue
|
| 329 |
+
|
| 330 |
+
record_type = "ATOM"
|
| 331 |
+
name = atom_name if len(atom_name) == 4 else f" {atom_name}"
|
| 332 |
+
alt_loc = ""
|
| 333 |
+
insertion_code = ""
|
| 334 |
+
occupancy = 1.00
|
| 335 |
+
element = atom_name[
|
| 336 |
+
0
|
| 337 |
+
] # Protein supports only C, N, O, S, this works.
|
| 338 |
+
charge = ""
|
| 339 |
+
|
| 340 |
+
chain_tag = "A"
|
| 341 |
+
if(chain_index is not None):
|
| 342 |
+
chain_tag = chain_tags[chain_index[i]]
|
| 343 |
+
|
| 344 |
+
# PDB is a columnar format, every space matters here!
|
| 345 |
+
atom_line = (
|
| 346 |
+
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
|
| 347 |
+
f"{res_name_3:>3} {chain_tag:>1}"
|
| 348 |
+
f"{residue_index[i]:>4}{insertion_code:>1} "
|
| 349 |
+
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
|
| 350 |
+
f"{occupancy:>6.2f}{b_factor:>6.2f} "
|
| 351 |
+
f"{element:>2}{charge:>2}"
|
| 352 |
+
)
|
| 353 |
+
pdb_lines.append(atom_line)
|
| 354 |
+
atom_index += 1
|
| 355 |
+
|
| 356 |
+
should_terminate = (i == n - 1)
|
| 357 |
+
if(chain_index is not None):
|
| 358 |
+
if(i != n - 1 and chain_index[i + 1] != prev_chain_index):
|
| 359 |
+
should_terminate = True
|
| 360 |
+
prev_chain_index = chain_index[i + 1]
|
| 361 |
+
|
| 362 |
+
if(should_terminate):
|
| 363 |
+
# Close the chain.
|
| 364 |
+
chain_end = "TER"
|
| 365 |
+
chain_termination_line = (
|
| 366 |
+
f"{chain_end:<6}{atom_index:>5} "
|
| 367 |
+
f"{res_1to3(aatype[i]):>3} "
|
| 368 |
+
f"{chain_tag:>1}{residue_index[i]:>4}"
|
| 369 |
+
)
|
| 370 |
+
pdb_lines.append(chain_termination_line)
|
| 371 |
+
atom_index += 1
|
| 372 |
+
|
| 373 |
+
if(i != n - 1):
|
| 374 |
+
# "prev" is a misnomer here. This happens at the beginning of
|
| 375 |
+
# each new chain.
|
| 376 |
+
pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))
|
| 377 |
+
|
| 378 |
+
pdb_lines.append("END")
|
| 379 |
+
pdb_lines.append("")
|
| 380 |
+
return "\n".join(pdb_lines)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def ideal_atom_mask(prot: Protein) -> np.ndarray:
|
| 384 |
+
"""Computes an ideal atom mask.
|
| 385 |
+
`Protein.atom_mask` typically is defined according to the atoms that are
|
| 386 |
+
reported in the PDB. This function computes a mask according to heavy atoms
|
| 387 |
+
that should be present in the given sequence of amino acids.
|
| 388 |
+
Args:
|
| 389 |
+
prot: `Protein` whose fields are `numpy.ndarray` objects.
|
| 390 |
+
Returns:
|
| 391 |
+
An ideal atom mask.
|
| 392 |
+
"""
|
| 393 |
+
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def from_prediction(
|
| 397 |
+
features: FeatureDict,
|
| 398 |
+
result: ModelOutput,
|
| 399 |
+
b_factors: Optional[np.ndarray] = None,
|
| 400 |
+
chain_index: Optional[np.ndarray] = None,
|
| 401 |
+
remark: Optional[str] = None,
|
| 402 |
+
parents: Optional[Sequence[str]] = None,
|
| 403 |
+
parents_chain_index: Optional[Sequence[int]] = None
|
| 404 |
+
) -> Protein:
|
| 405 |
+
"""Assembles a protein from a prediction.
|
| 406 |
+
Args:
|
| 407 |
+
features: Dictionary holding model inputs.
|
| 408 |
+
result: Dictionary holding model outputs.
|
| 409 |
+
b_factors: (Optional) B-factors to use for the protein.
|
| 410 |
+
chain_index: (Optional) Chain indices for multi-chain predictions
|
| 411 |
+
remark: (Optional) Remark about the prediction
|
| 412 |
+
parents: (Optional) List of template names
|
| 413 |
+
Returns:
|
| 414 |
+
A protein instance.
|
| 415 |
+
"""
|
| 416 |
+
if b_factors is None:
|
| 417 |
+
b_factors = np.zeros_like(result["final_atom_mask"])
|
| 418 |
+
|
| 419 |
+
return Protein(
|
| 420 |
+
aatype=features["aatype"],
|
| 421 |
+
atom_positions=result["final_atom_positions"],
|
| 422 |
+
atom_mask=result["final_atom_mask"],
|
| 423 |
+
residue_index=features["residue_index"] + 1,
|
| 424 |
+
b_factors=b_factors,
|
| 425 |
+
chain_index=chain_index,
|
| 426 |
+
remark=remark,
|
| 427 |
+
parents=parents,
|
| 428 |
+
parents_chain_index=parents_chain_index,
|
| 429 |
+
)
|
openfold/residue_constants.py
ADDED
|
@@ -0,0 +1,1310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""Constants used in AlphaFold."""
|
| 17 |
+
|
| 18 |
+
import collections
|
| 19 |
+
import functools
|
| 20 |
+
from typing import Mapping, List, Tuple
|
| 21 |
+
from importlib import resources
|
| 22 |
+
|
| 23 |
+
import numpy as np
|
| 24 |
+
import tree
|
| 25 |
+
|
| 26 |
+
# Internal import (35fd).
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Distance from one CA to next CA [trans configuration: omega = 180].
|
| 30 |
+
ca_ca = 3.80209737096
|
| 31 |
+
|
| 32 |
+
# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
|
| 33 |
+
# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
|
| 34 |
+
# chi angles so their chi angle lists are empty.
|
| 35 |
+
chi_angles_atoms = {
|
| 36 |
+
"ALA": [],
|
| 37 |
+
# Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
|
| 38 |
+
"ARG": [
|
| 39 |
+
["N", "CA", "CB", "CG"],
|
| 40 |
+
["CA", "CB", "CG", "CD"],
|
| 41 |
+
["CB", "CG", "CD", "NE"],
|
| 42 |
+
["CG", "CD", "NE", "CZ"],
|
| 43 |
+
],
|
| 44 |
+
"ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
|
| 45 |
+
"ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
|
| 46 |
+
"CYS": [["N", "CA", "CB", "SG"]],
|
| 47 |
+
"GLN": [
|
| 48 |
+
["N", "CA", "CB", "CG"],
|
| 49 |
+
["CA", "CB", "CG", "CD"],
|
| 50 |
+
["CB", "CG", "CD", "OE1"],
|
| 51 |
+
],
|
| 52 |
+
"GLU": [
|
| 53 |
+
["N", "CA", "CB", "CG"],
|
| 54 |
+
["CA", "CB", "CG", "CD"],
|
| 55 |
+
["CB", "CG", "CD", "OE1"],
|
| 56 |
+
],
|
| 57 |
+
"GLY": [],
|
| 58 |
+
"HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]],
|
| 59 |
+
"ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]],
|
| 60 |
+
"LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
|
| 61 |
+
"LYS": [
|
| 62 |
+
["N", "CA", "CB", "CG"],
|
| 63 |
+
["CA", "CB", "CG", "CD"],
|
| 64 |
+
["CB", "CG", "CD", "CE"],
|
| 65 |
+
["CG", "CD", "CE", "NZ"],
|
| 66 |
+
],
|
| 67 |
+
"MET": [
|
| 68 |
+
["N", "CA", "CB", "CG"],
|
| 69 |
+
["CA", "CB", "CG", "SD"],
|
| 70 |
+
["CB", "CG", "SD", "CE"],
|
| 71 |
+
],
|
| 72 |
+
"PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
|
| 73 |
+
"PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]],
|
| 74 |
+
"SER": [["N", "CA", "CB", "OG"]],
|
| 75 |
+
"THR": [["N", "CA", "CB", "OG1"]],
|
| 76 |
+
"TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
|
| 77 |
+
"TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
|
| 78 |
+
"VAL": [["N", "CA", "CB", "CG1"]],
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
# If chi angles given in fixed-length array, this matrix determines how to mask
|
| 82 |
+
# them for each AA type. The order is as per restype_order (see below).
|
| 83 |
+
chi_angles_mask = [
|
| 84 |
+
[0.0, 0.0, 0.0, 0.0], # ALA
|
| 85 |
+
[1.0, 1.0, 1.0, 1.0], # ARG
|
| 86 |
+
[1.0, 1.0, 0.0, 0.0], # ASN
|
| 87 |
+
[1.0, 1.0, 0.0, 0.0], # ASP
|
| 88 |
+
[1.0, 0.0, 0.0, 0.0], # CYS
|
| 89 |
+
[1.0, 1.0, 1.0, 0.0], # GLN
|
| 90 |
+
[1.0, 1.0, 1.0, 0.0], # GLU
|
| 91 |
+
[0.0, 0.0, 0.0, 0.0], # GLY
|
| 92 |
+
[1.0, 1.0, 0.0, 0.0], # HIS
|
| 93 |
+
[1.0, 1.0, 0.0, 0.0], # ILE
|
| 94 |
+
[1.0, 1.0, 0.0, 0.0], # LEU
|
| 95 |
+
[1.0, 1.0, 1.0, 1.0], # LYS
|
| 96 |
+
[1.0, 1.0, 1.0, 0.0], # MET
|
| 97 |
+
[1.0, 1.0, 0.0, 0.0], # PHE
|
| 98 |
+
[1.0, 1.0, 0.0, 0.0], # PRO
|
| 99 |
+
[1.0, 0.0, 0.0, 0.0], # SER
|
| 100 |
+
[1.0, 0.0, 0.0, 0.0], # THR
|
| 101 |
+
[1.0, 1.0, 0.0, 0.0], # TRP
|
| 102 |
+
[1.0, 1.0, 0.0, 0.0], # TYR
|
| 103 |
+
[1.0, 0.0, 0.0, 0.0], # VAL
|
| 104 |
+
]
|
| 105 |
+
|
| 106 |
+
# The following chi angles are pi periodic: they can be rotated by a multiple
|
| 107 |
+
# of pi without affecting the structure.
|
| 108 |
+
chi_pi_periodic = [
|
| 109 |
+
[0.0, 0.0, 0.0, 0.0], # ALA
|
| 110 |
+
[0.0, 0.0, 0.0, 0.0], # ARG
|
| 111 |
+
[0.0, 0.0, 0.0, 0.0], # ASN
|
| 112 |
+
[0.0, 1.0, 0.0, 0.0], # ASP
|
| 113 |
+
[0.0, 0.0, 0.0, 0.0], # CYS
|
| 114 |
+
[0.0, 0.0, 0.0, 0.0], # GLN
|
| 115 |
+
[0.0, 0.0, 1.0, 0.0], # GLU
|
| 116 |
+
[0.0, 0.0, 0.0, 0.0], # GLY
|
| 117 |
+
[0.0, 0.0, 0.0, 0.0], # HIS
|
| 118 |
+
[0.0, 0.0, 0.0, 0.0], # ILE
|
| 119 |
+
[0.0, 0.0, 0.0, 0.0], # LEU
|
| 120 |
+
[0.0, 0.0, 0.0, 0.0], # LYS
|
| 121 |
+
[0.0, 0.0, 0.0, 0.0], # MET
|
| 122 |
+
[0.0, 1.0, 0.0, 0.0], # PHE
|
| 123 |
+
[0.0, 0.0, 0.0, 0.0], # PRO
|
| 124 |
+
[0.0, 0.0, 0.0, 0.0], # SER
|
| 125 |
+
[0.0, 0.0, 0.0, 0.0], # THR
|
| 126 |
+
[0.0, 0.0, 0.0, 0.0], # TRP
|
| 127 |
+
[0.0, 1.0, 0.0, 0.0], # TYR
|
| 128 |
+
[0.0, 0.0, 0.0, 0.0], # VAL
|
| 129 |
+
[0.0, 0.0, 0.0, 0.0], # UNK
|
| 130 |
+
]
|
| 131 |
+
|
| 132 |
+
# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
|
| 133 |
+
# psi and chi angles:
|
| 134 |
+
# 0: 'backbone group',
|
| 135 |
+
# 1: 'pre-omega-group', (empty)
|
| 136 |
+
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
|
| 137 |
+
# 3: 'psi-group',
|
| 138 |
+
# 4,5,6,7: 'chi1,2,3,4-group'
|
| 139 |
+
# The atom positions are relative to the axis-end-atom of the corresponding
|
| 140 |
+
# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
|
| 141 |
+
# is defined such that the dihedral-angle-definiting atom (the last entry in
|
| 142 |
+
# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
|
| 143 |
+
# format: [atomname, group_idx, rel_position]
|
| 144 |
+
rigid_group_atom_positions = {
|
| 145 |
+
"ALA": [
|
| 146 |
+
["N", 0, (-0.525, 1.363, 0.000)],
|
| 147 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 148 |
+
["C", 0, (1.526, -0.000, -0.000)],
|
| 149 |
+
["CB", 0, (-0.529, -0.774, -1.205)],
|
| 150 |
+
["O", 3, (0.627, 1.062, 0.000)],
|
| 151 |
+
],
|
| 152 |
+
"ARG": [
|
| 153 |
+
["N", 0, (-0.524, 1.362, -0.000)],
|
| 154 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 155 |
+
["C", 0, (1.525, -0.000, -0.000)],
|
| 156 |
+
["CB", 0, (-0.524, -0.778, -1.209)],
|
| 157 |
+
["O", 3, (0.626, 1.062, 0.000)],
|
| 158 |
+
["CG", 4, (0.616, 1.390, -0.000)],
|
| 159 |
+
["CD", 5, (0.564, 1.414, 0.000)],
|
| 160 |
+
["NE", 6, (0.539, 1.357, -0.000)],
|
| 161 |
+
["NH1", 7, (0.206, 2.301, 0.000)],
|
| 162 |
+
["NH2", 7, (2.078, 0.978, -0.000)],
|
| 163 |
+
["CZ", 7, (0.758, 1.093, -0.000)],
|
| 164 |
+
],
|
| 165 |
+
"ASN": [
|
| 166 |
+
["N", 0, (-0.536, 1.357, 0.000)],
|
| 167 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 168 |
+
["C", 0, (1.526, -0.000, -0.000)],
|
| 169 |
+
["CB", 0, (-0.531, -0.787, -1.200)],
|
| 170 |
+
["O", 3, (0.625, 1.062, 0.000)],
|
| 171 |
+
["CG", 4, (0.584, 1.399, 0.000)],
|
| 172 |
+
["ND2", 5, (0.593, -1.188, 0.001)],
|
| 173 |
+
["OD1", 5, (0.633, 1.059, 0.000)],
|
| 174 |
+
],
|
| 175 |
+
"ASP": [
|
| 176 |
+
["N", 0, (-0.525, 1.362, -0.000)],
|
| 177 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 178 |
+
["C", 0, (1.527, 0.000, -0.000)],
|
| 179 |
+
["CB", 0, (-0.526, -0.778, -1.208)],
|
| 180 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 181 |
+
["CG", 4, (0.593, 1.398, -0.000)],
|
| 182 |
+
["OD1", 5, (0.610, 1.091, 0.000)],
|
| 183 |
+
["OD2", 5, (0.592, -1.101, -0.003)],
|
| 184 |
+
],
|
| 185 |
+
"CYS": [
|
| 186 |
+
["N", 0, (-0.522, 1.362, -0.000)],
|
| 187 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 188 |
+
["C", 0, (1.524, 0.000, 0.000)],
|
| 189 |
+
["CB", 0, (-0.519, -0.773, -1.212)],
|
| 190 |
+
["O", 3, (0.625, 1.062, -0.000)],
|
| 191 |
+
["SG", 4, (0.728, 1.653, 0.000)],
|
| 192 |
+
],
|
| 193 |
+
"GLN": [
|
| 194 |
+
["N", 0, (-0.526, 1.361, -0.000)],
|
| 195 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 196 |
+
["C", 0, (1.526, 0.000, 0.000)],
|
| 197 |
+
["CB", 0, (-0.525, -0.779, -1.207)],
|
| 198 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 199 |
+
["CG", 4, (0.615, 1.393, 0.000)],
|
| 200 |
+
["CD", 5, (0.587, 1.399, -0.000)],
|
| 201 |
+
["NE2", 6, (0.593, -1.189, -0.001)],
|
| 202 |
+
["OE1", 6, (0.634, 1.060, 0.000)],
|
| 203 |
+
],
|
| 204 |
+
"GLU": [
|
| 205 |
+
["N", 0, (-0.528, 1.361, 0.000)],
|
| 206 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 207 |
+
["C", 0, (1.526, -0.000, -0.000)],
|
| 208 |
+
["CB", 0, (-0.526, -0.781, -1.207)],
|
| 209 |
+
["O", 3, (0.626, 1.062, 0.000)],
|
| 210 |
+
["CG", 4, (0.615, 1.392, 0.000)],
|
| 211 |
+
["CD", 5, (0.600, 1.397, 0.000)],
|
| 212 |
+
["OE1", 6, (0.607, 1.095, -0.000)],
|
| 213 |
+
["OE2", 6, (0.589, -1.104, -0.001)],
|
| 214 |
+
],
|
| 215 |
+
"GLY": [
|
| 216 |
+
["N", 0, (-0.572, 1.337, 0.000)],
|
| 217 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 218 |
+
["C", 0, (1.517, -0.000, -0.000)],
|
| 219 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 220 |
+
],
|
| 221 |
+
"HIS": [
|
| 222 |
+
["N", 0, (-0.527, 1.360, 0.000)],
|
| 223 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 224 |
+
["C", 0, (1.525, 0.000, 0.000)],
|
| 225 |
+
["CB", 0, (-0.525, -0.778, -1.208)],
|
| 226 |
+
["O", 3, (0.625, 1.063, 0.000)],
|
| 227 |
+
["CG", 4, (0.600, 1.370, -0.000)],
|
| 228 |
+
["CD2", 5, (0.889, -1.021, 0.003)],
|
| 229 |
+
["ND1", 5, (0.744, 1.160, -0.000)],
|
| 230 |
+
["CE1", 5, (2.030, 0.851, 0.002)],
|
| 231 |
+
["NE2", 5, (2.145, -0.466, 0.004)],
|
| 232 |
+
],
|
| 233 |
+
"ILE": [
|
| 234 |
+
["N", 0, (-0.493, 1.373, -0.000)],
|
| 235 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 236 |
+
["C", 0, (1.527, -0.000, -0.000)],
|
| 237 |
+
["CB", 0, (-0.536, -0.793, -1.213)],
|
| 238 |
+
["O", 3, (0.627, 1.062, -0.000)],
|
| 239 |
+
["CG1", 4, (0.534, 1.437, -0.000)],
|
| 240 |
+
["CG2", 4, (0.540, -0.785, -1.199)],
|
| 241 |
+
["CD1", 5, (0.619, 1.391, 0.000)],
|
| 242 |
+
],
|
| 243 |
+
"LEU": [
|
| 244 |
+
["N", 0, (-0.520, 1.363, 0.000)],
|
| 245 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 246 |
+
["C", 0, (1.525, -0.000, -0.000)],
|
| 247 |
+
["CB", 0, (-0.522, -0.773, -1.214)],
|
| 248 |
+
["O", 3, (0.625, 1.063, -0.000)],
|
| 249 |
+
["CG", 4, (0.678, 1.371, 0.000)],
|
| 250 |
+
["CD1", 5, (0.530, 1.430, -0.000)],
|
| 251 |
+
["CD2", 5, (0.535, -0.774, 1.200)],
|
| 252 |
+
],
|
| 253 |
+
"LYS": [
|
| 254 |
+
["N", 0, (-0.526, 1.362, -0.000)],
|
| 255 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 256 |
+
["C", 0, (1.526, 0.000, 0.000)],
|
| 257 |
+
["CB", 0, (-0.524, -0.778, -1.208)],
|
| 258 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 259 |
+
["CG", 4, (0.619, 1.390, 0.000)],
|
| 260 |
+
["CD", 5, (0.559, 1.417, 0.000)],
|
| 261 |
+
["CE", 6, (0.560, 1.416, 0.000)],
|
| 262 |
+
["NZ", 7, (0.554, 1.387, 0.000)],
|
| 263 |
+
],
|
| 264 |
+
"MET": [
|
| 265 |
+
["N", 0, (-0.521, 1.364, -0.000)],
|
| 266 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 267 |
+
["C", 0, (1.525, 0.000, 0.000)],
|
| 268 |
+
["CB", 0, (-0.523, -0.776, -1.210)],
|
| 269 |
+
["O", 3, (0.625, 1.062, -0.000)],
|
| 270 |
+
["CG", 4, (0.613, 1.391, -0.000)],
|
| 271 |
+
["SD", 5, (0.703, 1.695, 0.000)],
|
| 272 |
+
["CE", 6, (0.320, 1.786, -0.000)],
|
| 273 |
+
],
|
| 274 |
+
"PHE": [
|
| 275 |
+
["N", 0, (-0.518, 1.363, 0.000)],
|
| 276 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 277 |
+
["C", 0, (1.524, 0.000, -0.000)],
|
| 278 |
+
["CB", 0, (-0.525, -0.776, -1.212)],
|
| 279 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 280 |
+
["CG", 4, (0.607, 1.377, 0.000)],
|
| 281 |
+
["CD1", 5, (0.709, 1.195, -0.000)],
|
| 282 |
+
["CD2", 5, (0.706, -1.196, 0.000)],
|
| 283 |
+
["CE1", 5, (2.102, 1.198, -0.000)],
|
| 284 |
+
["CE2", 5, (2.098, -1.201, -0.000)],
|
| 285 |
+
["CZ", 5, (2.794, -0.003, -0.001)],
|
| 286 |
+
],
|
| 287 |
+
"PRO": [
|
| 288 |
+
["N", 0, (-0.566, 1.351, -0.000)],
|
| 289 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 290 |
+
["C", 0, (1.527, -0.000, 0.000)],
|
| 291 |
+
["CB", 0, (-0.546, -0.611, -1.293)],
|
| 292 |
+
["O", 3, (0.621, 1.066, 0.000)],
|
| 293 |
+
["CG", 4, (0.382, 1.445, 0.0)],
|
| 294 |
+
# ['CD', 5, (0.427, 1.440, 0.0)],
|
| 295 |
+
["CD", 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger
|
| 296 |
+
],
|
| 297 |
+
"SER": [
|
| 298 |
+
["N", 0, (-0.529, 1.360, -0.000)],
|
| 299 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 300 |
+
["C", 0, (1.525, -0.000, -0.000)],
|
| 301 |
+
["CB", 0, (-0.518, -0.777, -1.211)],
|
| 302 |
+
["O", 3, (0.626, 1.062, -0.000)],
|
| 303 |
+
["OG", 4, (0.503, 1.325, 0.000)],
|
| 304 |
+
],
|
| 305 |
+
"THR": [
|
| 306 |
+
["N", 0, (-0.517, 1.364, 0.000)],
|
| 307 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 308 |
+
["C", 0, (1.526, 0.000, -0.000)],
|
| 309 |
+
["CB", 0, (-0.516, -0.793, -1.215)],
|
| 310 |
+
["O", 3, (0.626, 1.062, 0.000)],
|
| 311 |
+
["CG2", 4, (0.550, -0.718, -1.228)],
|
| 312 |
+
["OG1", 4, (0.472, 1.353, 0.000)],
|
| 313 |
+
],
|
| 314 |
+
"TRP": [
|
| 315 |
+
["N", 0, (-0.521, 1.363, 0.000)],
|
| 316 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 317 |
+
["C", 0, (1.525, -0.000, 0.000)],
|
| 318 |
+
["CB", 0, (-0.523, -0.776, -1.212)],
|
| 319 |
+
["O", 3, (0.627, 1.062, 0.000)],
|
| 320 |
+
["CG", 4, (0.609, 1.370, -0.000)],
|
| 321 |
+
["CD1", 5, (0.824, 1.091, 0.000)],
|
| 322 |
+
["CD2", 5, (0.854, -1.148, -0.005)],
|
| 323 |
+
["CE2", 5, (2.186, -0.678, -0.007)],
|
| 324 |
+
["CE3", 5, (0.622, -2.530, -0.007)],
|
| 325 |
+
["NE1", 5, (2.140, 0.690, -0.004)],
|
| 326 |
+
["CH2", 5, (3.028, -2.890, -0.013)],
|
| 327 |
+
["CZ2", 5, (3.283, -1.543, -0.011)],
|
| 328 |
+
["CZ3", 5, (1.715, -3.389, -0.011)],
|
| 329 |
+
],
|
| 330 |
+
"TYR": [
|
| 331 |
+
["N", 0, (-0.522, 1.362, 0.000)],
|
| 332 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 333 |
+
["C", 0, (1.524, -0.000, -0.000)],
|
| 334 |
+
["CB", 0, (-0.522, -0.776, -1.213)],
|
| 335 |
+
["O", 3, (0.627, 1.062, -0.000)],
|
| 336 |
+
["CG", 4, (0.607, 1.382, -0.000)],
|
| 337 |
+
["CD1", 5, (0.716, 1.195, -0.000)],
|
| 338 |
+
["CD2", 5, (0.713, -1.194, -0.001)],
|
| 339 |
+
["CE1", 5, (2.107, 1.200, -0.002)],
|
| 340 |
+
["CE2", 5, (2.104, -1.201, -0.003)],
|
| 341 |
+
["OH", 5, (4.168, -0.002, -0.005)],
|
| 342 |
+
["CZ", 5, (2.791, -0.001, -0.003)],
|
| 343 |
+
],
|
| 344 |
+
"VAL": [
|
| 345 |
+
["N", 0, (-0.494, 1.373, -0.000)],
|
| 346 |
+
["CA", 0, (0.000, 0.000, 0.000)],
|
| 347 |
+
["C", 0, (1.527, -0.000, -0.000)],
|
| 348 |
+
["CB", 0, (-0.533, -0.795, -1.213)],
|
| 349 |
+
["O", 3, (0.627, 1.062, -0.000)],
|
| 350 |
+
["CG1", 4, (0.540, 1.429, -0.000)],
|
| 351 |
+
["CG2", 4, (0.533, -0.776, 1.203)],
|
| 352 |
+
],
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
|
| 356 |
+
residue_atoms = {
|
| 357 |
+
"ALA": ["C", "CA", "CB", "N", "O"],
|
| 358 |
+
"ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"],
|
| 359 |
+
"ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"],
|
| 360 |
+
"ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"],
|
| 361 |
+
"CYS": ["C", "CA", "CB", "N", "O", "SG"],
|
| 362 |
+
"GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"],
|
| 363 |
+
"GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"],
|
| 364 |
+
"GLY": ["C", "CA", "N", "O"],
|
| 365 |
+
"HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"],
|
| 366 |
+
"ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"],
|
| 367 |
+
"LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"],
|
| 368 |
+
"LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"],
|
| 369 |
+
"MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"],
|
| 370 |
+
"PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"],
|
| 371 |
+
"PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"],
|
| 372 |
+
"SER": ["C", "CA", "CB", "N", "O", "OG"],
|
| 373 |
+
"THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"],
|
| 374 |
+
"TRP": [
|
| 375 |
+
"C",
|
| 376 |
+
"CA",
|
| 377 |
+
"CB",
|
| 378 |
+
"CG",
|
| 379 |
+
"CD1",
|
| 380 |
+
"CD2",
|
| 381 |
+
"CE2",
|
| 382 |
+
"CE3",
|
| 383 |
+
"CZ2",
|
| 384 |
+
"CZ3",
|
| 385 |
+
"CH2",
|
| 386 |
+
"N",
|
| 387 |
+
"NE1",
|
| 388 |
+
"O",
|
| 389 |
+
],
|
| 390 |
+
"TYR": [
|
| 391 |
+
"C",
|
| 392 |
+
"CA",
|
| 393 |
+
"CB",
|
| 394 |
+
"CG",
|
| 395 |
+
"CD1",
|
| 396 |
+
"CD2",
|
| 397 |
+
"CE1",
|
| 398 |
+
"CE2",
|
| 399 |
+
"CZ",
|
| 400 |
+
"N",
|
| 401 |
+
"O",
|
| 402 |
+
"OH",
|
| 403 |
+
],
|
| 404 |
+
"VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"],
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
# Naming swaps for ambiguous atom names.
|
| 408 |
+
# Due to symmetries in the amino acids the naming of atoms is ambiguous in
|
| 409 |
+
# 4 of the 20 amino acids.
|
| 410 |
+
# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
|
| 411 |
+
# in LEU, VAL and ARG can be resolved by using the 3d constellations of
|
| 412 |
+
# the 'ambiguous' atoms and their neighbours)
|
| 413 |
+
# TODO: ^ interpret this
|
| 414 |
+
residue_atom_renaming_swaps = {
|
| 415 |
+
"ASP": {"OD1": "OD2"},
|
| 416 |
+
"GLU": {"OE1": "OE2"},
|
| 417 |
+
"PHE": {"CD1": "CD2", "CE1": "CE2"},
|
| 418 |
+
"TYR": {"CD1": "CD2", "CE1": "CE2"},
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
|
| 422 |
+
van_der_waals_radius = {
|
| 423 |
+
"C": 1.7,
|
| 424 |
+
"N": 1.55,
|
| 425 |
+
"O": 1.52,
|
| 426 |
+
"S": 1.8,
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
Bond = collections.namedtuple(
|
| 430 |
+
"Bond", ["atom1_name", "atom2_name", "length", "stddev"]
|
| 431 |
+
)
|
| 432 |
+
BondAngle = collections.namedtuple(
|
| 433 |
+
"BondAngle",
|
| 434 |
+
["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"],
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
@functools.lru_cache(maxsize=None)
|
| 439 |
+
def load_stereo_chemical_props() -> Tuple[
|
| 440 |
+
Mapping[str, List[Bond]],
|
| 441 |
+
Mapping[str, List[Bond]],
|
| 442 |
+
Mapping[str, List[BondAngle]],
|
| 443 |
+
]:
|
| 444 |
+
"""Load stereo_chemical_props.txt into a nice structure.
|
| 445 |
+
|
| 446 |
+
Load literature values for bond lengths and bond angles and translate
|
| 447 |
+
bond angles into the length of the opposite edge of the triangle
|
| 448 |
+
("residue_virtual_bonds").
|
| 449 |
+
|
| 450 |
+
Returns:
|
| 451 |
+
residue_bonds: dict that maps resname --> list of Bond tuples
|
| 452 |
+
residue_virtual_bonds: dict that maps resname --> list of Bond tuples
|
| 453 |
+
residue_bond_angles: dict that maps resname --> list of BondAngle tuples
|
| 454 |
+
"""
|
| 455 |
+
# TODO: this file should be downloaded in a setup script
|
| 456 |
+
stereo_chemical_props = resources.read_text("openfold.resources", "stereo_chemical_props.txt")
|
| 457 |
+
|
| 458 |
+
lines_iter = iter(stereo_chemical_props.splitlines())
|
| 459 |
+
# Load bond lengths.
|
| 460 |
+
residue_bonds = {}
|
| 461 |
+
next(lines_iter) # Skip header line.
|
| 462 |
+
for line in lines_iter:
|
| 463 |
+
if line.strip() == "-":
|
| 464 |
+
break
|
| 465 |
+
bond, resname, length, stddev = line.split()
|
| 466 |
+
atom1, atom2 = bond.split("-")
|
| 467 |
+
if resname not in residue_bonds:
|
| 468 |
+
residue_bonds[resname] = []
|
| 469 |
+
residue_bonds[resname].append(
|
| 470 |
+
Bond(atom1, atom2, float(length), float(stddev))
|
| 471 |
+
)
|
| 472 |
+
residue_bonds["UNK"] = []
|
| 473 |
+
|
| 474 |
+
# Load bond angles.
|
| 475 |
+
residue_bond_angles = {}
|
| 476 |
+
next(lines_iter) # Skip empty line.
|
| 477 |
+
next(lines_iter) # Skip header line.
|
| 478 |
+
for line in lines_iter:
|
| 479 |
+
if line.strip() == "-":
|
| 480 |
+
break
|
| 481 |
+
bond, resname, angle_degree, stddev_degree = line.split()
|
| 482 |
+
atom1, atom2, atom3 = bond.split("-")
|
| 483 |
+
if resname not in residue_bond_angles:
|
| 484 |
+
residue_bond_angles[resname] = []
|
| 485 |
+
residue_bond_angles[resname].append(
|
| 486 |
+
BondAngle(
|
| 487 |
+
atom1,
|
| 488 |
+
atom2,
|
| 489 |
+
atom3,
|
| 490 |
+
float(angle_degree) / 180.0 * np.pi,
|
| 491 |
+
float(stddev_degree) / 180.0 * np.pi,
|
| 492 |
+
)
|
| 493 |
+
)
|
| 494 |
+
residue_bond_angles["UNK"] = []
|
| 495 |
+
|
| 496 |
+
def make_bond_key(atom1_name, atom2_name):
|
| 497 |
+
"""Unique key to lookup bonds."""
|
| 498 |
+
return "-".join(sorted([atom1_name, atom2_name]))
|
| 499 |
+
|
| 500 |
+
# Translate bond angles into distances ("virtual bonds").
|
| 501 |
+
residue_virtual_bonds = {}
|
| 502 |
+
for resname, bond_angles in residue_bond_angles.items():
|
| 503 |
+
# Create a fast lookup dict for bond lengths.
|
| 504 |
+
bond_cache = {}
|
| 505 |
+
for b in residue_bonds[resname]:
|
| 506 |
+
bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
|
| 507 |
+
residue_virtual_bonds[resname] = []
|
| 508 |
+
for ba in bond_angles:
|
| 509 |
+
bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
|
| 510 |
+
bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
|
| 511 |
+
|
| 512 |
+
# Compute distance between atom1 and atom3 using the law of cosines
|
| 513 |
+
# c^2 = a^2 + b^2 - 2ab*cos(gamma).
|
| 514 |
+
gamma = ba.angle_rad
|
| 515 |
+
length = np.sqrt(
|
| 516 |
+
bond1.length ** 2
|
| 517 |
+
+ bond2.length ** 2
|
| 518 |
+
- 2 * bond1.length * bond2.length * np.cos(gamma)
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
# Propagation of uncertainty assuming uncorrelated errors.
|
| 522 |
+
dl_outer = 0.5 / length
|
| 523 |
+
dl_dgamma = (
|
| 524 |
+
2 * bond1.length * bond2.length * np.sin(gamma)
|
| 525 |
+
) * dl_outer
|
| 526 |
+
dl_db1 = (
|
| 527 |
+
2 * bond1.length - 2 * bond2.length * np.cos(gamma)
|
| 528 |
+
) * dl_outer
|
| 529 |
+
dl_db2 = (
|
| 530 |
+
2 * bond2.length - 2 * bond1.length * np.cos(gamma)
|
| 531 |
+
) * dl_outer
|
| 532 |
+
stddev = np.sqrt(
|
| 533 |
+
(dl_dgamma * ba.stddev) ** 2
|
| 534 |
+
+ (dl_db1 * bond1.stddev) ** 2
|
| 535 |
+
+ (dl_db2 * bond2.stddev) ** 2
|
| 536 |
+
)
|
| 537 |
+
residue_virtual_bonds[resname].append(
|
| 538 |
+
Bond(ba.atom1_name, ba.atom3name, length, stddev)
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
return (residue_bonds, residue_virtual_bonds, residue_bond_angles)
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
# Between-residue bond lengths for general bonds (first element) and for Proline
|
| 545 |
+
# (second element).
|
| 546 |
+
between_res_bond_length_c_n = [1.329, 1.341]
|
| 547 |
+
between_res_bond_length_stddev_c_n = [0.014, 0.016]
|
| 548 |
+
|
| 549 |
+
# Between-residue cos_angles.
|
| 550 |
+
between_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315
|
| 551 |
+
between_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995
|
| 552 |
+
|
| 553 |
+
# This mapping is used when we need to store atom data in a format that requires
|
| 554 |
+
# fixed atom data size for every residue (e.g. a numpy array).
|
| 555 |
+
atom_types = [
|
| 556 |
+
"N",
|
| 557 |
+
"CA",
|
| 558 |
+
"C",
|
| 559 |
+
"CB",
|
| 560 |
+
"O",
|
| 561 |
+
"CG",
|
| 562 |
+
"CG1",
|
| 563 |
+
"CG2",
|
| 564 |
+
"OG",
|
| 565 |
+
"OG1",
|
| 566 |
+
"SG",
|
| 567 |
+
"CD",
|
| 568 |
+
"CD1",
|
| 569 |
+
"CD2",
|
| 570 |
+
"ND1",
|
| 571 |
+
"ND2",
|
| 572 |
+
"OD1",
|
| 573 |
+
"OD2",
|
| 574 |
+
"SD",
|
| 575 |
+
"CE",
|
| 576 |
+
"CE1",
|
| 577 |
+
"CE2",
|
| 578 |
+
"CE3",
|
| 579 |
+
"NE",
|
| 580 |
+
"NE1",
|
| 581 |
+
"NE2",
|
| 582 |
+
"OE1",
|
| 583 |
+
"OE2",
|
| 584 |
+
"CH2",
|
| 585 |
+
"NH1",
|
| 586 |
+
"NH2",
|
| 587 |
+
"OH",
|
| 588 |
+
"CZ",
|
| 589 |
+
"CZ2",
|
| 590 |
+
"CZ3",
|
| 591 |
+
"NZ",
|
| 592 |
+
"OXT",
|
| 593 |
+
]
|
| 594 |
+
atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)}
|
| 595 |
+
atom_type_num = len(atom_types) # := 37.
|
| 596 |
+
|
| 597 |
+
# A compact atom encoding with 14 columns
|
| 598 |
+
# pylint: disable=line-too-long
|
| 599 |
+
# pylint: disable=bad-whitespace
|
| 600 |
+
restype_name_to_atom14_names = {
|
| 601 |
+
"ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""],
|
| 602 |
+
"ARG": [
|
| 603 |
+
"N",
|
| 604 |
+
"CA",
|
| 605 |
+
"C",
|
| 606 |
+
"O",
|
| 607 |
+
"CB",
|
| 608 |
+
"CG",
|
| 609 |
+
"CD",
|
| 610 |
+
"NE",
|
| 611 |
+
"CZ",
|
| 612 |
+
"NH1",
|
| 613 |
+
"NH2",
|
| 614 |
+
"",
|
| 615 |
+
"",
|
| 616 |
+
"",
|
| 617 |
+
],
|
| 618 |
+
"ASN": [
|
| 619 |
+
"N",
|
| 620 |
+
"CA",
|
| 621 |
+
"C",
|
| 622 |
+
"O",
|
| 623 |
+
"CB",
|
| 624 |
+
"CG",
|
| 625 |
+
"OD1",
|
| 626 |
+
"ND2",
|
| 627 |
+
"",
|
| 628 |
+
"",
|
| 629 |
+
"",
|
| 630 |
+
"",
|
| 631 |
+
"",
|
| 632 |
+
"",
|
| 633 |
+
],
|
| 634 |
+
"ASP": [
|
| 635 |
+
"N",
|
| 636 |
+
"CA",
|
| 637 |
+
"C",
|
| 638 |
+
"O",
|
| 639 |
+
"CB",
|
| 640 |
+
"CG",
|
| 641 |
+
"OD1",
|
| 642 |
+
"OD2",
|
| 643 |
+
"",
|
| 644 |
+
"",
|
| 645 |
+
"",
|
| 646 |
+
"",
|
| 647 |
+
"",
|
| 648 |
+
"",
|
| 649 |
+
],
|
| 650 |
+
"CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""],
|
| 651 |
+
"GLN": [
|
| 652 |
+
"N",
|
| 653 |
+
"CA",
|
| 654 |
+
"C",
|
| 655 |
+
"O",
|
| 656 |
+
"CB",
|
| 657 |
+
"CG",
|
| 658 |
+
"CD",
|
| 659 |
+
"OE1",
|
| 660 |
+
"NE2",
|
| 661 |
+
"",
|
| 662 |
+
"",
|
| 663 |
+
"",
|
| 664 |
+
"",
|
| 665 |
+
"",
|
| 666 |
+
],
|
| 667 |
+
"GLU": [
|
| 668 |
+
"N",
|
| 669 |
+
"CA",
|
| 670 |
+
"C",
|
| 671 |
+
"O",
|
| 672 |
+
"CB",
|
| 673 |
+
"CG",
|
| 674 |
+
"CD",
|
| 675 |
+
"OE1",
|
| 676 |
+
"OE2",
|
| 677 |
+
"",
|
| 678 |
+
"",
|
| 679 |
+
"",
|
| 680 |
+
"",
|
| 681 |
+
"",
|
| 682 |
+
],
|
| 683 |
+
"GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""],
|
| 684 |
+
"HIS": [
|
| 685 |
+
"N",
|
| 686 |
+
"CA",
|
| 687 |
+
"C",
|
| 688 |
+
"O",
|
| 689 |
+
"CB",
|
| 690 |
+
"CG",
|
| 691 |
+
"ND1",
|
| 692 |
+
"CD2",
|
| 693 |
+
"CE1",
|
| 694 |
+
"NE2",
|
| 695 |
+
"",
|
| 696 |
+
"",
|
| 697 |
+
"",
|
| 698 |
+
"",
|
| 699 |
+
],
|
| 700 |
+
"ILE": [
|
| 701 |
+
"N",
|
| 702 |
+
"CA",
|
| 703 |
+
"C",
|
| 704 |
+
"O",
|
| 705 |
+
"CB",
|
| 706 |
+
"CG1",
|
| 707 |
+
"CG2",
|
| 708 |
+
"CD1",
|
| 709 |
+
"",
|
| 710 |
+
"",
|
| 711 |
+
"",
|
| 712 |
+
"",
|
| 713 |
+
"",
|
| 714 |
+
"",
|
| 715 |
+
],
|
| 716 |
+
"LEU": [
|
| 717 |
+
"N",
|
| 718 |
+
"CA",
|
| 719 |
+
"C",
|
| 720 |
+
"O",
|
| 721 |
+
"CB",
|
| 722 |
+
"CG",
|
| 723 |
+
"CD1",
|
| 724 |
+
"CD2",
|
| 725 |
+
"",
|
| 726 |
+
"",
|
| 727 |
+
"",
|
| 728 |
+
"",
|
| 729 |
+
"",
|
| 730 |
+
"",
|
| 731 |
+
],
|
| 732 |
+
"LYS": [
|
| 733 |
+
"N",
|
| 734 |
+
"CA",
|
| 735 |
+
"C",
|
| 736 |
+
"O",
|
| 737 |
+
"CB",
|
| 738 |
+
"CG",
|
| 739 |
+
"CD",
|
| 740 |
+
"CE",
|
| 741 |
+
"NZ",
|
| 742 |
+
"",
|
| 743 |
+
"",
|
| 744 |
+
"",
|
| 745 |
+
"",
|
| 746 |
+
"",
|
| 747 |
+
],
|
| 748 |
+
"MET": [
|
| 749 |
+
"N",
|
| 750 |
+
"CA",
|
| 751 |
+
"C",
|
| 752 |
+
"O",
|
| 753 |
+
"CB",
|
| 754 |
+
"CG",
|
| 755 |
+
"SD",
|
| 756 |
+
"CE",
|
| 757 |
+
"",
|
| 758 |
+
"",
|
| 759 |
+
"",
|
| 760 |
+
"",
|
| 761 |
+
"",
|
| 762 |
+
"",
|
| 763 |
+
],
|
| 764 |
+
"PHE": [
|
| 765 |
+
"N",
|
| 766 |
+
"CA",
|
| 767 |
+
"C",
|
| 768 |
+
"O",
|
| 769 |
+
"CB",
|
| 770 |
+
"CG",
|
| 771 |
+
"CD1",
|
| 772 |
+
"CD2",
|
| 773 |
+
"CE1",
|
| 774 |
+
"CE2",
|
| 775 |
+
"CZ",
|
| 776 |
+
"",
|
| 777 |
+
"",
|
| 778 |
+
"",
|
| 779 |
+
],
|
| 780 |
+
"PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""],
|
| 781 |
+
"SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""],
|
| 782 |
+
"THR": [
|
| 783 |
+
"N",
|
| 784 |
+
"CA",
|
| 785 |
+
"C",
|
| 786 |
+
"O",
|
| 787 |
+
"CB",
|
| 788 |
+
"OG1",
|
| 789 |
+
"CG2",
|
| 790 |
+
"",
|
| 791 |
+
"",
|
| 792 |
+
"",
|
| 793 |
+
"",
|
| 794 |
+
"",
|
| 795 |
+
"",
|
| 796 |
+
"",
|
| 797 |
+
],
|
| 798 |
+
"TRP": [
|
| 799 |
+
"N",
|
| 800 |
+
"CA",
|
| 801 |
+
"C",
|
| 802 |
+
"O",
|
| 803 |
+
"CB",
|
| 804 |
+
"CG",
|
| 805 |
+
"CD1",
|
| 806 |
+
"CD2",
|
| 807 |
+
"NE1",
|
| 808 |
+
"CE2",
|
| 809 |
+
"CE3",
|
| 810 |
+
"CZ2",
|
| 811 |
+
"CZ3",
|
| 812 |
+
"CH2",
|
| 813 |
+
],
|
| 814 |
+
"TYR": [
|
| 815 |
+
"N",
|
| 816 |
+
"CA",
|
| 817 |
+
"C",
|
| 818 |
+
"O",
|
| 819 |
+
"CB",
|
| 820 |
+
"CG",
|
| 821 |
+
"CD1",
|
| 822 |
+
"CD2",
|
| 823 |
+
"CE1",
|
| 824 |
+
"CE2",
|
| 825 |
+
"CZ",
|
| 826 |
+
"OH",
|
| 827 |
+
"",
|
| 828 |
+
"",
|
| 829 |
+
],
|
| 830 |
+
"VAL": [
|
| 831 |
+
"N",
|
| 832 |
+
"CA",
|
| 833 |
+
"C",
|
| 834 |
+
"O",
|
| 835 |
+
"CB",
|
| 836 |
+
"CG1",
|
| 837 |
+
"CG2",
|
| 838 |
+
"",
|
| 839 |
+
"",
|
| 840 |
+
"",
|
| 841 |
+
"",
|
| 842 |
+
"",
|
| 843 |
+
"",
|
| 844 |
+
"",
|
| 845 |
+
],
|
| 846 |
+
"UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""],
|
| 847 |
+
}
|
| 848 |
+
# pylint: enable=line-too-long
|
| 849 |
+
# pylint: enable=bad-whitespace
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
# This is the standard residue order when coding AA type as a number.
|
| 853 |
+
# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
|
| 854 |
+
restypes = [
|
| 855 |
+
"A",
|
| 856 |
+
"R",
|
| 857 |
+
"N",
|
| 858 |
+
"D",
|
| 859 |
+
"C",
|
| 860 |
+
"Q",
|
| 861 |
+
"E",
|
| 862 |
+
"G",
|
| 863 |
+
"H",
|
| 864 |
+
"I",
|
| 865 |
+
"L",
|
| 866 |
+
"K",
|
| 867 |
+
"M",
|
| 868 |
+
"F",
|
| 869 |
+
"P",
|
| 870 |
+
"S",
|
| 871 |
+
"T",
|
| 872 |
+
"W",
|
| 873 |
+
"Y",
|
| 874 |
+
"V",
|
| 875 |
+
]
|
| 876 |
+
restype_order = {restype: i for i, restype in enumerate(restypes)}
|
| 877 |
+
restype_num = len(restypes) # := 20.
|
| 878 |
+
unk_restype_index = restype_num # Catch-all index for unknown restypes.
|
| 879 |
+
|
| 880 |
+
restypes_with_x = restypes + ["X"]
|
| 881 |
+
restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
def sequence_to_onehot(
|
| 885 |
+
sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False
|
| 886 |
+
) -> np.ndarray:
|
| 887 |
+
"""Maps the given sequence into a one-hot encoded matrix.
|
| 888 |
+
|
| 889 |
+
Args:
|
| 890 |
+
sequence: An amino acid sequence.
|
| 891 |
+
mapping: A dictionary mapping amino acids to integers.
|
| 892 |
+
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
|
| 893 |
+
mapped to the unknown amino acid 'X'. If the mapping doesn't contain
|
| 894 |
+
amino acid 'X', an error will be thrown. If False, any amino acid not in
|
| 895 |
+
the mapping will throw an error.
|
| 896 |
+
|
| 897 |
+
Returns:
|
| 898 |
+
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of
|
| 899 |
+
the sequence.
|
| 900 |
+
|
| 901 |
+
Raises:
|
| 902 |
+
ValueError: If the mapping doesn't contain values from 0 to
|
| 903 |
+
num_unique_aas - 1 without any gaps.
|
| 904 |
+
"""
|
| 905 |
+
num_entries = max(mapping.values()) + 1
|
| 906 |
+
|
| 907 |
+
if sorted(set(mapping.values())) != list(range(num_entries)):
|
| 908 |
+
raise ValueError(
|
| 909 |
+
"The mapping must have values from 0 to num_unique_aas-1 "
|
| 910 |
+
"without any gaps. Got: %s" % sorted(mapping.values())
|
| 911 |
+
)
|
| 912 |
+
|
| 913 |
+
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
|
| 914 |
+
|
| 915 |
+
for aa_index, aa_type in enumerate(sequence):
|
| 916 |
+
if map_unknown_to_x:
|
| 917 |
+
if aa_type.isalpha() and aa_type.isupper():
|
| 918 |
+
aa_id = mapping.get(aa_type, mapping["X"])
|
| 919 |
+
else:
|
| 920 |
+
raise ValueError(
|
| 921 |
+
f"Invalid character in the sequence: {aa_type}"
|
| 922 |
+
)
|
| 923 |
+
else:
|
| 924 |
+
aa_id = mapping[aa_type]
|
| 925 |
+
one_hot_arr[aa_index, aa_id] = 1
|
| 926 |
+
|
| 927 |
+
return one_hot_arr
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
restype_1to3 = {
|
| 931 |
+
"A": "ALA",
|
| 932 |
+
"R": "ARG",
|
| 933 |
+
"N": "ASN",
|
| 934 |
+
"D": "ASP",
|
| 935 |
+
"C": "CYS",
|
| 936 |
+
"Q": "GLN",
|
| 937 |
+
"E": "GLU",
|
| 938 |
+
"G": "GLY",
|
| 939 |
+
"H": "HIS",
|
| 940 |
+
"I": "ILE",
|
| 941 |
+
"L": "LEU",
|
| 942 |
+
"K": "LYS",
|
| 943 |
+
"M": "MET",
|
| 944 |
+
"F": "PHE",
|
| 945 |
+
"P": "PRO",
|
| 946 |
+
"S": "SER",
|
| 947 |
+
"T": "THR",
|
| 948 |
+
"W": "TRP",
|
| 949 |
+
"Y": "TYR",
|
| 950 |
+
"V": "VAL",
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
|
| 955 |
+
# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
|
| 956 |
+
# many more, and less common, three letter names as keys and maps many of these
|
| 957 |
+
# to the same one letter name (including 'X' and 'U' which we don't use here).
|
| 958 |
+
restype_3to1 = {v: k for k, v in restype_1to3.items()}
|
| 959 |
+
|
| 960 |
+
# Define a restype name for all unknown residues.
|
| 961 |
+
unk_restype = "UNK"
|
| 962 |
+
|
| 963 |
+
resnames = [restype_1to3[r] for r in restypes] + [unk_restype]
|
| 964 |
+
resname_to_idx = {resname: i for i, resname in enumerate(resnames)}
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
# The mapping here uses hhblits convention, so that B is mapped to D, J and O
|
| 968 |
+
# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
|
| 969 |
+
# remaining 20 amino acids are kept in alphabetical order.
|
| 970 |
+
# There are 2 non-amino acid codes, X (representing any amino acid) and
|
| 971 |
+
# "-" representing a missing amino acid in an alignment. The id for these
|
| 972 |
+
# codes is put at the end (20 and 21) so that they can easily be ignored if
|
| 973 |
+
# desired.
|
| 974 |
+
HHBLITS_AA_TO_ID = {
|
| 975 |
+
"A": 0,
|
| 976 |
+
"B": 2,
|
| 977 |
+
"C": 1,
|
| 978 |
+
"D": 2,
|
| 979 |
+
"E": 3,
|
| 980 |
+
"F": 4,
|
| 981 |
+
"G": 5,
|
| 982 |
+
"H": 6,
|
| 983 |
+
"I": 7,
|
| 984 |
+
"J": 20,
|
| 985 |
+
"K": 8,
|
| 986 |
+
"L": 9,
|
| 987 |
+
"M": 10,
|
| 988 |
+
"N": 11,
|
| 989 |
+
"O": 20,
|
| 990 |
+
"P": 12,
|
| 991 |
+
"Q": 13,
|
| 992 |
+
"R": 14,
|
| 993 |
+
"S": 15,
|
| 994 |
+
"T": 16,
|
| 995 |
+
"U": 1,
|
| 996 |
+
"V": 17,
|
| 997 |
+
"W": 18,
|
| 998 |
+
"X": 20,
|
| 999 |
+
"Y": 19,
|
| 1000 |
+
"Z": 3,
|
| 1001 |
+
"-": 21,
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
# Partial inversion of HHBLITS_AA_TO_ID.
|
| 1005 |
+
ID_TO_HHBLITS_AA = {
|
| 1006 |
+
0: "A",
|
| 1007 |
+
1: "C", # Also U.
|
| 1008 |
+
2: "D", # Also B.
|
| 1009 |
+
3: "E", # Also Z.
|
| 1010 |
+
4: "F",
|
| 1011 |
+
5: "G",
|
| 1012 |
+
6: "H",
|
| 1013 |
+
7: "I",
|
| 1014 |
+
8: "K",
|
| 1015 |
+
9: "L",
|
| 1016 |
+
10: "M",
|
| 1017 |
+
11: "N",
|
| 1018 |
+
12: "P",
|
| 1019 |
+
13: "Q",
|
| 1020 |
+
14: "R",
|
| 1021 |
+
15: "S",
|
| 1022 |
+
16: "T",
|
| 1023 |
+
17: "V",
|
| 1024 |
+
18: "W",
|
| 1025 |
+
19: "Y",
|
| 1026 |
+
20: "X", # Includes J and O.
|
| 1027 |
+
21: "-",
|
| 1028 |
+
}
|
| 1029 |
+
|
| 1030 |
+
restypes_with_x_and_gap = restypes + ["X", "-"]
|
| 1031 |
+
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(
|
| 1032 |
+
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])
|
| 1033 |
+
for i in range(len(restypes_with_x_and_gap))
|
| 1034 |
+
)
|
| 1035 |
+
|
| 1036 |
+
|
| 1037 |
+
def _make_standard_atom_mask() -> np.ndarray:
|
| 1038 |
+
"""Returns [num_res_types, num_atom_types] mask array."""
|
| 1039 |
+
# +1 to account for unknown (all 0s).
|
| 1040 |
+
mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
|
| 1041 |
+
for restype, restype_letter in enumerate(restypes):
|
| 1042 |
+
restype_name = restype_1to3[restype_letter]
|
| 1043 |
+
atom_names = residue_atoms[restype_name]
|
| 1044 |
+
for atom_name in atom_names:
|
| 1045 |
+
atom_type = atom_order[atom_name]
|
| 1046 |
+
mask[restype, atom_type] = 1
|
| 1047 |
+
return mask
|
| 1048 |
+
|
| 1049 |
+
|
| 1050 |
+
STANDARD_ATOM_MASK = _make_standard_atom_mask()
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
# A one hot representation for the first and second atoms defining the axis
|
| 1054 |
+
# of rotation for each chi-angle in each residue.
|
| 1055 |
+
def chi_angle_atom(atom_index: int) -> np.ndarray:
|
| 1056 |
+
"""Define chi-angle rigid groups via one-hot representations."""
|
| 1057 |
+
chi_angles_index = {}
|
| 1058 |
+
one_hots = []
|
| 1059 |
+
|
| 1060 |
+
for k, v in chi_angles_atoms.items():
|
| 1061 |
+
indices = [atom_types.index(s[atom_index]) for s in v]
|
| 1062 |
+
indices.extend([-1] * (4 - len(indices)))
|
| 1063 |
+
chi_angles_index[k] = indices
|
| 1064 |
+
|
| 1065 |
+
for r in restypes:
|
| 1066 |
+
res3 = restype_1to3[r]
|
| 1067 |
+
one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
|
| 1068 |
+
one_hots.append(one_hot)
|
| 1069 |
+
|
| 1070 |
+
one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
|
| 1071 |
+
one_hot = np.stack(one_hots, axis=0)
|
| 1072 |
+
one_hot = np.transpose(one_hot, [0, 2, 1])
|
| 1073 |
+
|
| 1074 |
+
return one_hot
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
chi_atom_1_one_hot = chi_angle_atom(1)
|
| 1078 |
+
chi_atom_2_one_hot = chi_angle_atom(2)
|
| 1079 |
+
|
| 1080 |
+
# An array like chi_angles_atoms but using indices rather than names.
|
| 1081 |
+
chi_angles_atom_indices = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
|
| 1082 |
+
chi_angles_atom_indices = tree.map_structure(
|
| 1083 |
+
lambda atom_name: atom_order[atom_name], chi_angles_atom_indices
|
| 1084 |
+
)
|
| 1085 |
+
chi_angles_atom_indices = np.array(
|
| 1086 |
+
[
|
| 1087 |
+
chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms)))
|
| 1088 |
+
for chi_atoms in chi_angles_atom_indices
|
| 1089 |
+
]
|
| 1090 |
+
)
|
| 1091 |
+
|
| 1092 |
+
# Mapping from (res_name, atom_name) pairs to the atom's chi group index
|
| 1093 |
+
# and atom index within that group.
|
| 1094 |
+
chi_groups_for_atom = collections.defaultdict(list)
|
| 1095 |
+
for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
|
| 1096 |
+
for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
|
| 1097 |
+
for atom_i, atom in enumerate(chi_group):
|
| 1098 |
+
chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
|
| 1099 |
+
chi_groups_for_atom = dict(chi_groups_for_atom)
|
| 1100 |
+
|
| 1101 |
+
|
| 1102 |
+
def _make_rigid_transformation_4x4(ex, ey, translation):
|
| 1103 |
+
"""Create a rigid 4x4 transformation matrix from two axes and transl."""
|
| 1104 |
+
# Normalize ex.
|
| 1105 |
+
ex_normalized = ex / np.linalg.norm(ex)
|
| 1106 |
+
|
| 1107 |
+
# make ey perpendicular to ex
|
| 1108 |
+
ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
|
| 1109 |
+
ey_normalized /= np.linalg.norm(ey_normalized)
|
| 1110 |
+
|
| 1111 |
+
# compute ez as cross product
|
| 1112 |
+
eznorm = np.cross(ex_normalized, ey_normalized)
|
| 1113 |
+
m = np.stack(
|
| 1114 |
+
[ex_normalized, ey_normalized, eznorm, translation]
|
| 1115 |
+
).transpose()
|
| 1116 |
+
m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)
|
| 1117 |
+
return m
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
# create an array with (restype, atomtype) --> rigid_group_idx
|
| 1121 |
+
# and an array with (restype, atomtype, coord) for the atom positions
|
| 1122 |
+
# and compute affine transformation matrices (4,4) from one rigid group to the
|
| 1123 |
+
# previous group
|
| 1124 |
+
restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=np.int)
|
| 1125 |
+
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
|
| 1126 |
+
restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
|
| 1127 |
+
restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=np.int)
|
| 1128 |
+
restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
|
| 1129 |
+
restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
|
| 1130 |
+
restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
def _make_rigid_group_constants():
|
| 1134 |
+
"""Fill the arrays above."""
|
| 1135 |
+
for restype, restype_letter in enumerate(restypes):
|
| 1136 |
+
resname = restype_1to3[restype_letter]
|
| 1137 |
+
for atomname, group_idx, atom_position in rigid_group_atom_positions[
|
| 1138 |
+
resname
|
| 1139 |
+
]:
|
| 1140 |
+
atomtype = atom_order[atomname]
|
| 1141 |
+
restype_atom37_to_rigid_group[restype, atomtype] = group_idx
|
| 1142 |
+
restype_atom37_mask[restype, atomtype] = 1
|
| 1143 |
+
restype_atom37_rigid_group_positions[
|
| 1144 |
+
restype, atomtype, :
|
| 1145 |
+
] = atom_position
|
| 1146 |
+
|
| 1147 |
+
atom14idx = restype_name_to_atom14_names[resname].index(atomname)
|
| 1148 |
+
restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
|
| 1149 |
+
restype_atom14_mask[restype, atom14idx] = 1
|
| 1150 |
+
restype_atom14_rigid_group_positions[
|
| 1151 |
+
restype, atom14idx, :
|
| 1152 |
+
] = atom_position
|
| 1153 |
+
|
| 1154 |
+
for restype, restype_letter in enumerate(restypes):
|
| 1155 |
+
resname = restype_1to3[restype_letter]
|
| 1156 |
+
atom_positions = {
|
| 1157 |
+
name: np.array(pos)
|
| 1158 |
+
for name, _, pos in rigid_group_atom_positions[resname]
|
| 1159 |
+
}
|
| 1160 |
+
|
| 1161 |
+
# backbone to backbone is the identity transform
|
| 1162 |
+
restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
|
| 1163 |
+
|
| 1164 |
+
# pre-omega-frame to backbone (currently dummy identity matrix)
|
| 1165 |
+
restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
|
| 1166 |
+
|
| 1167 |
+
# phi-frame to backbone
|
| 1168 |
+
mat = _make_rigid_transformation_4x4(
|
| 1169 |
+
ex=atom_positions["N"] - atom_positions["CA"],
|
| 1170 |
+
ey=np.array([1.0, 0.0, 0.0]),
|
| 1171 |
+
translation=atom_positions["N"],
|
| 1172 |
+
)
|
| 1173 |
+
restype_rigid_group_default_frame[restype, 2, :, :] = mat
|
| 1174 |
+
|
| 1175 |
+
# psi-frame to backbone
|
| 1176 |
+
mat = _make_rigid_transformation_4x4(
|
| 1177 |
+
ex=atom_positions["C"] - atom_positions["CA"],
|
| 1178 |
+
ey=atom_positions["CA"] - atom_positions["N"],
|
| 1179 |
+
translation=atom_positions["C"],
|
| 1180 |
+
)
|
| 1181 |
+
restype_rigid_group_default_frame[restype, 3, :, :] = mat
|
| 1182 |
+
|
| 1183 |
+
# chi1-frame to backbone
|
| 1184 |
+
if chi_angles_mask[restype][0]:
|
| 1185 |
+
base_atom_names = chi_angles_atoms[resname][0]
|
| 1186 |
+
base_atom_positions = [
|
| 1187 |
+
atom_positions[name] for name in base_atom_names
|
| 1188 |
+
]
|
| 1189 |
+
mat = _make_rigid_transformation_4x4(
|
| 1190 |
+
ex=base_atom_positions[2] - base_atom_positions[1],
|
| 1191 |
+
ey=base_atom_positions[0] - base_atom_positions[1],
|
| 1192 |
+
translation=base_atom_positions[2],
|
| 1193 |
+
)
|
| 1194 |
+
restype_rigid_group_default_frame[restype, 4, :, :] = mat
|
| 1195 |
+
|
| 1196 |
+
# chi2-frame to chi1-frame
|
| 1197 |
+
# chi3-frame to chi2-frame
|
| 1198 |
+
# chi4-frame to chi3-frame
|
| 1199 |
+
# luckily all rotation axes for the next frame start at (0,0,0) of the
|
| 1200 |
+
# previous frame
|
| 1201 |
+
for chi_idx in range(1, 4):
|
| 1202 |
+
if chi_angles_mask[restype][chi_idx]:
|
| 1203 |
+
axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
|
| 1204 |
+
axis_end_atom_position = atom_positions[axis_end_atom_name]
|
| 1205 |
+
mat = _make_rigid_transformation_4x4(
|
| 1206 |
+
ex=axis_end_atom_position,
|
| 1207 |
+
ey=np.array([-1.0, 0.0, 0.0]),
|
| 1208 |
+
translation=axis_end_atom_position,
|
| 1209 |
+
)
|
| 1210 |
+
restype_rigid_group_default_frame[
|
| 1211 |
+
restype, 4 + chi_idx, :, :
|
| 1212 |
+
] = mat
|
| 1213 |
+
|
| 1214 |
+
|
| 1215 |
+
_make_rigid_group_constants()
|
| 1216 |
+
|
| 1217 |
+
|
| 1218 |
+
def make_atom14_dists_bounds(
|
| 1219 |
+
overlap_tolerance=1.5, bond_length_tolerance_factor=15
|
| 1220 |
+
):
|
| 1221 |
+
"""compute upper and lower bounds for bonds to assess violations."""
|
| 1222 |
+
restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
|
| 1223 |
+
restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
|
| 1224 |
+
restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
|
| 1225 |
+
residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
|
| 1226 |
+
for restype, restype_letter in enumerate(restypes):
|
| 1227 |
+
resname = restype_1to3[restype_letter]
|
| 1228 |
+
atom_list = restype_name_to_atom14_names[resname]
|
| 1229 |
+
|
| 1230 |
+
# create lower and upper bounds for clashes
|
| 1231 |
+
for atom1_idx, atom1_name in enumerate(atom_list):
|
| 1232 |
+
if not atom1_name:
|
| 1233 |
+
continue
|
| 1234 |
+
atom1_radius = van_der_waals_radius[atom1_name[0]]
|
| 1235 |
+
for atom2_idx, atom2_name in enumerate(atom_list):
|
| 1236 |
+
if (not atom2_name) or atom1_idx == atom2_idx:
|
| 1237 |
+
continue
|
| 1238 |
+
atom2_radius = van_der_waals_radius[atom2_name[0]]
|
| 1239 |
+
lower = atom1_radius + atom2_radius - overlap_tolerance
|
| 1240 |
+
upper = 1e10
|
| 1241 |
+
restype_atom14_bond_lower_bound[
|
| 1242 |
+
restype, atom1_idx, atom2_idx
|
| 1243 |
+
] = lower
|
| 1244 |
+
restype_atom14_bond_lower_bound[
|
| 1245 |
+
restype, atom2_idx, atom1_idx
|
| 1246 |
+
] = lower
|
| 1247 |
+
restype_atom14_bond_upper_bound[
|
| 1248 |
+
restype, atom1_idx, atom2_idx
|
| 1249 |
+
] = upper
|
| 1250 |
+
restype_atom14_bond_upper_bound[
|
| 1251 |
+
restype, atom2_idx, atom1_idx
|
| 1252 |
+
] = upper
|
| 1253 |
+
|
| 1254 |
+
# overwrite lower and upper bounds for bonds and angles
|
| 1255 |
+
for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
|
| 1256 |
+
atom1_idx = atom_list.index(b.atom1_name)
|
| 1257 |
+
atom2_idx = atom_list.index(b.atom2_name)
|
| 1258 |
+
lower = b.length - bond_length_tolerance_factor * b.stddev
|
| 1259 |
+
upper = b.length + bond_length_tolerance_factor * b.stddev
|
| 1260 |
+
restype_atom14_bond_lower_bound[
|
| 1261 |
+
restype, atom1_idx, atom2_idx
|
| 1262 |
+
] = lower
|
| 1263 |
+
restype_atom14_bond_lower_bound[
|
| 1264 |
+
restype, atom2_idx, atom1_idx
|
| 1265 |
+
] = lower
|
| 1266 |
+
restype_atom14_bond_upper_bound[
|
| 1267 |
+
restype, atom1_idx, atom2_idx
|
| 1268 |
+
] = upper
|
| 1269 |
+
restype_atom14_bond_upper_bound[
|
| 1270 |
+
restype, atom2_idx, atom1_idx
|
| 1271 |
+
] = upper
|
| 1272 |
+
restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
|
| 1273 |
+
restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
|
| 1274 |
+
return {
|
| 1275 |
+
"lower_bound": restype_atom14_bond_lower_bound, # shape (21,14,14)
|
| 1276 |
+
"upper_bound": restype_atom14_bond_upper_bound, # shape (21,14,14)
|
| 1277 |
+
"stddev": restype_atom14_bond_stddev, # shape (21,14,14)
|
| 1278 |
+
}
|
| 1279 |
+
|
| 1280 |
+
|
| 1281 |
+
restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32)
|
| 1282 |
+
restype_atom14_ambiguous_atoms_swap_idx = np.tile(
|
| 1283 |
+
np.arange(14, dtype=np.int), (21, 1)
|
| 1284 |
+
)
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
def _make_atom14_ambiguity_feats():
|
| 1288 |
+
for res, pairs in residue_atom_renaming_swaps.items():
|
| 1289 |
+
res_idx = restype_order[restype_3to1[res]]
|
| 1290 |
+
for atom1, atom2 in pairs.items():
|
| 1291 |
+
atom1_idx = restype_name_to_atom14_names[res].index(atom1)
|
| 1292 |
+
atom2_idx = restype_name_to_atom14_names[res].index(atom2)
|
| 1293 |
+
restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1
|
| 1294 |
+
restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1
|
| 1295 |
+
restype_atom14_ambiguous_atoms_swap_idx[
|
| 1296 |
+
res_idx, atom1_idx
|
| 1297 |
+
] = atom2_idx
|
| 1298 |
+
restype_atom14_ambiguous_atoms_swap_idx[
|
| 1299 |
+
res_idx, atom2_idx
|
| 1300 |
+
] = atom1_idx
|
| 1301 |
+
|
| 1302 |
+
|
| 1303 |
+
_make_atom14_ambiguity_feats()
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
def aatype_to_str_sequence(aatype):
|
| 1307 |
+
return ''.join([
|
| 1308 |
+
restypes_with_x[aatype[i]]
|
| 1309 |
+
for i in range(len(aatype))
|
| 1310 |
+
])
|
openfold/rigid_utils.py
ADDED
|
@@ -0,0 +1,1368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
from typing import Tuple, Any, Sequence, Callable, Optional
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def rot_matmul(
|
| 24 |
+
a: torch.Tensor,
|
| 25 |
+
b: torch.Tensor
|
| 26 |
+
) -> torch.Tensor:
|
| 27 |
+
"""
|
| 28 |
+
Performs matrix multiplication of two rotation matrix tensors. Written
|
| 29 |
+
out by hand to avoid AMP downcasting.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
a: [*, 3, 3] left multiplicand
|
| 33 |
+
b: [*, 3, 3] right multiplicand
|
| 34 |
+
Returns:
|
| 35 |
+
The product ab
|
| 36 |
+
"""
|
| 37 |
+
def row_mul(i):
|
| 38 |
+
return torch.stack(
|
| 39 |
+
[
|
| 40 |
+
a[..., i, 0] * b[..., 0, 0]
|
| 41 |
+
+ a[..., i, 1] * b[..., 1, 0]
|
| 42 |
+
+ a[..., i, 2] * b[..., 2, 0],
|
| 43 |
+
a[..., i, 0] * b[..., 0, 1]
|
| 44 |
+
+ a[..., i, 1] * b[..., 1, 1]
|
| 45 |
+
+ a[..., i, 2] * b[..., 2, 1],
|
| 46 |
+
a[..., i, 0] * b[..., 0, 2]
|
| 47 |
+
+ a[..., i, 1] * b[..., 1, 2]
|
| 48 |
+
+ a[..., i, 2] * b[..., 2, 2],
|
| 49 |
+
],
|
| 50 |
+
dim=-1,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
return torch.stack(
|
| 54 |
+
[
|
| 55 |
+
row_mul(0),
|
| 56 |
+
row_mul(1),
|
| 57 |
+
row_mul(2),
|
| 58 |
+
],
|
| 59 |
+
dim=-2
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def rot_vec_mul(
|
| 64 |
+
r: torch.Tensor,
|
| 65 |
+
t: torch.Tensor
|
| 66 |
+
) -> torch.Tensor:
|
| 67 |
+
"""
|
| 68 |
+
Applies a rotation to a vector. Written out by hand to avoid transfer
|
| 69 |
+
to avoid AMP downcasting.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
r: [*, 3, 3] rotation matrices
|
| 73 |
+
t: [*, 3] coordinate tensors
|
| 74 |
+
Returns:
|
| 75 |
+
[*, 3] rotated coordinates
|
| 76 |
+
"""
|
| 77 |
+
x, y, z = torch.unbind(t, dim=-1)
|
| 78 |
+
return torch.stack(
|
| 79 |
+
[
|
| 80 |
+
r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,
|
| 81 |
+
r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,
|
| 82 |
+
r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,
|
| 83 |
+
],
|
| 84 |
+
dim=-1,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def identity_rot_mats(
|
| 89 |
+
batch_dims: Tuple[int],
|
| 90 |
+
dtype: Optional[torch.dtype] = None,
|
| 91 |
+
device: Optional[torch.device] = None,
|
| 92 |
+
requires_grad: bool = True,
|
| 93 |
+
) -> torch.Tensor:
|
| 94 |
+
rots = torch.eye(
|
| 95 |
+
3, dtype=dtype, device=device, requires_grad=requires_grad
|
| 96 |
+
)
|
| 97 |
+
rots = rots.view(*((1,) * len(batch_dims)), 3, 3)
|
| 98 |
+
rots = rots.expand(*batch_dims, -1, -1)
|
| 99 |
+
rots = rots.contiguous()
|
| 100 |
+
|
| 101 |
+
return rots
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def identity_trans(
|
| 105 |
+
batch_dims: Tuple[int],
|
| 106 |
+
dtype: Optional[torch.dtype] = None,
|
| 107 |
+
device: Optional[torch.device] = None,
|
| 108 |
+
requires_grad: bool = True,
|
| 109 |
+
) -> torch.Tensor:
|
| 110 |
+
trans = torch.zeros(
|
| 111 |
+
(*batch_dims, 3),
|
| 112 |
+
dtype=dtype,
|
| 113 |
+
device=device,
|
| 114 |
+
requires_grad=requires_grad
|
| 115 |
+
)
|
| 116 |
+
return trans
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def identity_quats(
|
| 120 |
+
batch_dims: Tuple[int],
|
| 121 |
+
dtype: Optional[torch.dtype] = None,
|
| 122 |
+
device: Optional[torch.device] = None,
|
| 123 |
+
requires_grad: bool = True,
|
| 124 |
+
) -> torch.Tensor:
|
| 125 |
+
quat = torch.zeros(
|
| 126 |
+
(*batch_dims, 4),
|
| 127 |
+
dtype=dtype,
|
| 128 |
+
device=device,
|
| 129 |
+
requires_grad=requires_grad
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
with torch.no_grad():
|
| 133 |
+
quat[..., 0] = 1
|
| 134 |
+
|
| 135 |
+
return quat
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
_quat_elements = ["a", "b", "c", "d"]
|
| 139 |
+
_qtr_keys = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]
|
| 140 |
+
_qtr_ind_dict = {key: ind for ind, key in enumerate(_qtr_keys)}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _to_mat(pairs):
|
| 144 |
+
mat = np.zeros((4, 4))
|
| 145 |
+
for pair in pairs:
|
| 146 |
+
key, value = pair
|
| 147 |
+
ind = _qtr_ind_dict[key]
|
| 148 |
+
mat[ind // 4][ind % 4] = value
|
| 149 |
+
|
| 150 |
+
return mat
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
_QTR_MAT = np.zeros((4, 4, 3, 3))
|
| 154 |
+
_QTR_MAT[..., 0, 0] = _to_mat([("aa", 1), ("bb", 1), ("cc", -1), ("dd", -1)])
|
| 155 |
+
_QTR_MAT[..., 0, 1] = _to_mat([("bc", 2), ("ad", -2)])
|
| 156 |
+
_QTR_MAT[..., 0, 2] = _to_mat([("bd", 2), ("ac", 2)])
|
| 157 |
+
_QTR_MAT[..., 1, 0] = _to_mat([("bc", 2), ("ad", 2)])
|
| 158 |
+
_QTR_MAT[..., 1, 1] = _to_mat([("aa", 1), ("bb", -1), ("cc", 1), ("dd", -1)])
|
| 159 |
+
_QTR_MAT[..., 1, 2] = _to_mat([("cd", 2), ("ab", -2)])
|
| 160 |
+
_QTR_MAT[..., 2, 0] = _to_mat([("bd", 2), ("ac", -2)])
|
| 161 |
+
_QTR_MAT[..., 2, 1] = _to_mat([("cd", 2), ("ab", 2)])
|
| 162 |
+
_QTR_MAT[..., 2, 2] = _to_mat([("aa", 1), ("bb", -1), ("cc", -1), ("dd", 1)])
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
|
| 166 |
+
"""
|
| 167 |
+
Converts a quaternion to a rotation matrix.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
quat: [*, 4] quaternions
|
| 171 |
+
Returns:
|
| 172 |
+
[*, 3, 3] rotation matrices
|
| 173 |
+
"""
|
| 174 |
+
# [*, 4, 4]
|
| 175 |
+
quat = quat[..., None] * quat[..., None, :]
|
| 176 |
+
|
| 177 |
+
# [4, 4, 3, 3]
|
| 178 |
+
mat = quat.new_tensor(_QTR_MAT, requires_grad=False)
|
| 179 |
+
|
| 180 |
+
# [*, 4, 4, 3, 3]
|
| 181 |
+
shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
|
| 182 |
+
quat = quat[..., None, None] * shaped_qtr_mat
|
| 183 |
+
|
| 184 |
+
# [*, 3, 3]
|
| 185 |
+
return torch.sum(quat, dim=(-3, -4))
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def rot_to_quat(
|
| 189 |
+
rot: torch.Tensor,
|
| 190 |
+
):
|
| 191 |
+
if(rot.shape[-2:] != (3, 3)):
|
| 192 |
+
raise ValueError("Input rotation is incorrectly shaped")
|
| 193 |
+
|
| 194 |
+
rot = [[rot[..., i, j] for j in range(3)] for i in range(3)]
|
| 195 |
+
[[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = rot
|
| 196 |
+
|
| 197 |
+
k = [
|
| 198 |
+
[ xx + yy + zz, zy - yz, xz - zx, yx - xy,],
|
| 199 |
+
[ zy - yz, xx - yy - zz, xy + yx, xz + zx,],
|
| 200 |
+
[ xz - zx, xy + yx, yy - xx - zz, yz + zy,],
|
| 201 |
+
[ yx - xy, xz + zx, yz + zy, zz - xx - yy,]
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
k = (1./3.) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2)
|
| 205 |
+
|
| 206 |
+
_, vectors = torch.linalg.eigh(k)
|
| 207 |
+
return vectors[..., -1]
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
_QUAT_MULTIPLY = np.zeros((4, 4, 4))
|
| 211 |
+
_QUAT_MULTIPLY[:, :, 0] = [[ 1, 0, 0, 0],
|
| 212 |
+
[ 0,-1, 0, 0],
|
| 213 |
+
[ 0, 0,-1, 0],
|
| 214 |
+
[ 0, 0, 0,-1]]
|
| 215 |
+
|
| 216 |
+
_QUAT_MULTIPLY[:, :, 1] = [[ 0, 1, 0, 0],
|
| 217 |
+
[ 1, 0, 0, 0],
|
| 218 |
+
[ 0, 0, 0, 1],
|
| 219 |
+
[ 0, 0,-1, 0]]
|
| 220 |
+
|
| 221 |
+
_QUAT_MULTIPLY[:, :, 2] = [[ 0, 0, 1, 0],
|
| 222 |
+
[ 0, 0, 0,-1],
|
| 223 |
+
[ 1, 0, 0, 0],
|
| 224 |
+
[ 0, 1, 0, 0]]
|
| 225 |
+
|
| 226 |
+
_QUAT_MULTIPLY[:, :, 3] = [[ 0, 0, 0, 1],
|
| 227 |
+
[ 0, 0, 1, 0],
|
| 228 |
+
[ 0,-1, 0, 0],
|
| 229 |
+
[ 1, 0, 0, 0]]
|
| 230 |
+
|
| 231 |
+
_QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def quat_multiply(quat1, quat2):
|
| 235 |
+
"""Multiply a quaternion by another quaternion."""
|
| 236 |
+
mat = quat1.new_tensor(_QUAT_MULTIPLY)
|
| 237 |
+
reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)
|
| 238 |
+
return torch.sum(
|
| 239 |
+
reshaped_mat *
|
| 240 |
+
quat1[..., :, None, None] *
|
| 241 |
+
quat2[..., None, :, None],
|
| 242 |
+
dim=(-3, -2)
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def quat_multiply_by_vec(quat, vec):
|
| 247 |
+
"""Multiply a quaternion by a pure-vector quaternion."""
|
| 248 |
+
mat = quat.new_tensor(_QUAT_MULTIPLY_BY_VEC)
|
| 249 |
+
reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)
|
| 250 |
+
return torch.sum(
|
| 251 |
+
reshaped_mat *
|
| 252 |
+
quat[..., :, None, None] *
|
| 253 |
+
vec[..., None, :, None],
|
| 254 |
+
dim=(-3, -2)
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def invert_rot_mat(rot_mat: torch.Tensor):
|
| 259 |
+
return rot_mat.transpose(-1, -2)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def invert_quat(quat: torch.Tensor):
|
| 263 |
+
quat_prime = quat.clone()
|
| 264 |
+
quat_prime[..., 1:] *= -1
|
| 265 |
+
inv = quat_prime / torch.sum(quat ** 2, dim=-1, keepdim=True)
|
| 266 |
+
return inv
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class Rotation:
|
| 270 |
+
"""
|
| 271 |
+
A 3D rotation. Depending on how the object is initialized, the
|
| 272 |
+
rotation is represented by either a rotation matrix or a
|
| 273 |
+
quaternion, though both formats are made available by helper functions.
|
| 274 |
+
To simplify gradient computation, the underlying format of the
|
| 275 |
+
rotation cannot be changed in-place. Like Rigid, the class is designed
|
| 276 |
+
to mimic the behavior of a torch Tensor, almost as if each Rotation
|
| 277 |
+
object were a tensor of rotations, in one format or another.
|
| 278 |
+
"""
|
| 279 |
+
def __init__(self,
|
| 280 |
+
rot_mats: Optional[torch.Tensor] = None,
|
| 281 |
+
quats: Optional[torch.Tensor] = None,
|
| 282 |
+
normalize_quats: bool = True,
|
| 283 |
+
):
|
| 284 |
+
"""
|
| 285 |
+
Args:
|
| 286 |
+
rot_mats:
|
| 287 |
+
A [*, 3, 3] rotation matrix tensor. Mutually exclusive with
|
| 288 |
+
quats
|
| 289 |
+
quats:
|
| 290 |
+
A [*, 4] quaternion. Mutually exclusive with rot_mats. If
|
| 291 |
+
normalize_quats is not True, must be a unit quaternion
|
| 292 |
+
normalize_quats:
|
| 293 |
+
If quats is specified, whether to normalize quats
|
| 294 |
+
"""
|
| 295 |
+
if((rot_mats is None and quats is None) or
|
| 296 |
+
(rot_mats is not None and quats is not None)):
|
| 297 |
+
raise ValueError("Exactly one input argument must be specified")
|
| 298 |
+
|
| 299 |
+
if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or
|
| 300 |
+
(quats is not None and quats.shape[-1] != 4)):
|
| 301 |
+
raise ValueError(
|
| 302 |
+
"Incorrectly shaped rotation matrix or quaternion"
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Force full-precision
|
| 306 |
+
if(quats is not None):
|
| 307 |
+
quats = quats.to(dtype=torch.float32)
|
| 308 |
+
if(rot_mats is not None):
|
| 309 |
+
rot_mats = rot_mats.to(dtype=torch.float32)
|
| 310 |
+
|
| 311 |
+
if(quats is not None and normalize_quats):
|
| 312 |
+
quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
|
| 313 |
+
|
| 314 |
+
self._rot_mats = rot_mats
|
| 315 |
+
self._quats = quats
|
| 316 |
+
|
| 317 |
+
@staticmethod
|
| 318 |
+
def identity(
|
| 319 |
+
shape,
|
| 320 |
+
dtype: Optional[torch.dtype] = None,
|
| 321 |
+
device: Optional[torch.device] = None,
|
| 322 |
+
requires_grad: bool = True,
|
| 323 |
+
fmt: str = "quat",
|
| 324 |
+
) -> Rotation:
|
| 325 |
+
"""
|
| 326 |
+
Returns an identity Rotation.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
shape:
|
| 330 |
+
The "shape" of the resulting Rotation object. See documentation
|
| 331 |
+
for the shape property
|
| 332 |
+
dtype:
|
| 333 |
+
The torch dtype for the rotation
|
| 334 |
+
device:
|
| 335 |
+
The torch device for the new rotation
|
| 336 |
+
requires_grad:
|
| 337 |
+
Whether the underlying tensors in the new rotation object
|
| 338 |
+
should require gradient computation
|
| 339 |
+
fmt:
|
| 340 |
+
One of "quat" or "rot_mat". Determines the underlying format
|
| 341 |
+
of the new object's rotation
|
| 342 |
+
Returns:
|
| 343 |
+
A new identity rotation
|
| 344 |
+
"""
|
| 345 |
+
if(fmt == "rot_mat"):
|
| 346 |
+
rot_mats = identity_rot_mats(
|
| 347 |
+
shape, dtype, device, requires_grad,
|
| 348 |
+
)
|
| 349 |
+
return Rotation(rot_mats=rot_mats, quats=None)
|
| 350 |
+
elif(fmt == "quat"):
|
| 351 |
+
quats = identity_quats(shape, dtype, device, requires_grad)
|
| 352 |
+
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
|
| 353 |
+
else:
|
| 354 |
+
raise ValueError(f"Invalid format: f{fmt}")
|
| 355 |
+
|
| 356 |
+
# Magic methods
|
| 357 |
+
|
| 358 |
+
def __getitem__(self, index: Any) -> Rotation:
|
| 359 |
+
"""
|
| 360 |
+
Allows torch-style indexing over the virtual shape of the rotation
|
| 361 |
+
object. See documentation for the shape property.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
index:
|
| 365 |
+
A torch index. E.g. (1, 3, 2), or (slice(None,))
|
| 366 |
+
Returns:
|
| 367 |
+
The indexed rotation
|
| 368 |
+
"""
|
| 369 |
+
if type(index) != tuple:
|
| 370 |
+
index = (index,)
|
| 371 |
+
|
| 372 |
+
if(self._rot_mats is not None):
|
| 373 |
+
rot_mats = self._rot_mats[index + (slice(None), slice(None))]
|
| 374 |
+
return Rotation(rot_mats=rot_mats)
|
| 375 |
+
elif(self._quats is not None):
|
| 376 |
+
quats = self._quats[index + (slice(None),)]
|
| 377 |
+
return Rotation(quats=quats, normalize_quats=False)
|
| 378 |
+
else:
|
| 379 |
+
raise ValueError("Both rotations are None")
|
| 380 |
+
|
| 381 |
+
def __mul__(self,
|
| 382 |
+
right: torch.Tensor,
|
| 383 |
+
) -> Rotation:
|
| 384 |
+
"""
|
| 385 |
+
Pointwise left multiplication of the rotation with a tensor. Can be
|
| 386 |
+
used to e.g. mask the Rotation.
|
| 387 |
+
|
| 388 |
+
Args:
|
| 389 |
+
right:
|
| 390 |
+
The tensor multiplicand
|
| 391 |
+
Returns:
|
| 392 |
+
The product
|
| 393 |
+
"""
|
| 394 |
+
if not(isinstance(right, torch.Tensor)):
|
| 395 |
+
raise TypeError("The other multiplicand must be a Tensor")
|
| 396 |
+
|
| 397 |
+
if(self._rot_mats is not None):
|
| 398 |
+
rot_mats = self._rot_mats * right[..., None, None]
|
| 399 |
+
return Rotation(rot_mats=rot_mats, quats=None)
|
| 400 |
+
elif(self._quats is not None):
|
| 401 |
+
quats = self._quats * right[..., None]
|
| 402 |
+
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
|
| 403 |
+
else:
|
| 404 |
+
raise ValueError("Both rotations are None")
|
| 405 |
+
|
| 406 |
+
def __rmul__(self,
|
| 407 |
+
left: torch.Tensor,
|
| 408 |
+
) -> Rotation:
|
| 409 |
+
"""
|
| 410 |
+
Reverse pointwise multiplication of the rotation with a tensor.
|
| 411 |
+
|
| 412 |
+
Args:
|
| 413 |
+
left:
|
| 414 |
+
The left multiplicand
|
| 415 |
+
Returns:
|
| 416 |
+
The product
|
| 417 |
+
"""
|
| 418 |
+
return self.__mul__(left)
|
| 419 |
+
|
| 420 |
+
# Properties
|
| 421 |
+
|
| 422 |
+
@property
|
| 423 |
+
def shape(self) -> torch.Size:
|
| 424 |
+
"""
|
| 425 |
+
Returns the virtual shape of the rotation object. This shape is
|
| 426 |
+
defined as the batch dimensions of the underlying rotation matrix
|
| 427 |
+
or quaternion. If the Rotation was initialized with a [10, 3, 3]
|
| 428 |
+
rotation matrix tensor, for example, the resulting shape would be
|
| 429 |
+
[10].
|
| 430 |
+
|
| 431 |
+
Returns:
|
| 432 |
+
The virtual shape of the rotation object
|
| 433 |
+
"""
|
| 434 |
+
s = None
|
| 435 |
+
if(self._quats is not None):
|
| 436 |
+
s = self._quats.shape[:-1]
|
| 437 |
+
else:
|
| 438 |
+
s = self._rot_mats.shape[:-2]
|
| 439 |
+
|
| 440 |
+
return s
|
| 441 |
+
|
| 442 |
+
@property
|
| 443 |
+
def dtype(self) -> torch.dtype:
|
| 444 |
+
"""
|
| 445 |
+
Returns the dtype of the underlying rotation.
|
| 446 |
+
|
| 447 |
+
Returns:
|
| 448 |
+
The dtype of the underlying rotation
|
| 449 |
+
"""
|
| 450 |
+
if(self._rot_mats is not None):
|
| 451 |
+
return self._rot_mats.dtype
|
| 452 |
+
elif(self._quats is not None):
|
| 453 |
+
return self._quats.dtype
|
| 454 |
+
else:
|
| 455 |
+
raise ValueError("Both rotations are None")
|
| 456 |
+
|
| 457 |
+
@property
|
| 458 |
+
def device(self) -> torch.device:
|
| 459 |
+
"""
|
| 460 |
+
The device of the underlying rotation
|
| 461 |
+
|
| 462 |
+
Returns:
|
| 463 |
+
The device of the underlying rotation
|
| 464 |
+
"""
|
| 465 |
+
if(self._rot_mats is not None):
|
| 466 |
+
return self._rot_mats.device
|
| 467 |
+
elif(self._quats is not None):
|
| 468 |
+
return self._quats.device
|
| 469 |
+
else:
|
| 470 |
+
raise ValueError("Both rotations are None")
|
| 471 |
+
|
| 472 |
+
@property
|
| 473 |
+
def requires_grad(self) -> bool:
|
| 474 |
+
"""
|
| 475 |
+
Returns the requires_grad property of the underlying rotation
|
| 476 |
+
|
| 477 |
+
Returns:
|
| 478 |
+
The requires_grad property of the underlying tensor
|
| 479 |
+
"""
|
| 480 |
+
if(self._rot_mats is not None):
|
| 481 |
+
return self._rot_mats.requires_grad
|
| 482 |
+
elif(self._quats is not None):
|
| 483 |
+
return self._quats.requires_grad
|
| 484 |
+
else:
|
| 485 |
+
raise ValueError("Both rotations are None")
|
| 486 |
+
|
| 487 |
+
def get_rot_mats(self) -> torch.Tensor:
|
| 488 |
+
"""
|
| 489 |
+
Returns the underlying rotation as a rotation matrix tensor.
|
| 490 |
+
|
| 491 |
+
Returns:
|
| 492 |
+
The rotation as a rotation matrix tensor
|
| 493 |
+
"""
|
| 494 |
+
rot_mats = self._rot_mats
|
| 495 |
+
if(rot_mats is None):
|
| 496 |
+
if(self._quats is None):
|
| 497 |
+
raise ValueError("Both rotations are None")
|
| 498 |
+
else:
|
| 499 |
+
rot_mats = quat_to_rot(self._quats)
|
| 500 |
+
|
| 501 |
+
return rot_mats
|
| 502 |
+
|
| 503 |
+
def get_quats(self) -> torch.Tensor:
|
| 504 |
+
"""
|
| 505 |
+
Returns the underlying rotation as a quaternion tensor.
|
| 506 |
+
|
| 507 |
+
Depending on whether the Rotation was initialized with a
|
| 508 |
+
quaternion, this function may call torch.linalg.eigh.
|
| 509 |
+
|
| 510 |
+
Returns:
|
| 511 |
+
The rotation as a quaternion tensor.
|
| 512 |
+
"""
|
| 513 |
+
quats = self._quats
|
| 514 |
+
if(quats is None):
|
| 515 |
+
if(self._rot_mats is None):
|
| 516 |
+
raise ValueError("Both rotations are None")
|
| 517 |
+
else:
|
| 518 |
+
quats = rot_to_quat(self._rot_mats)
|
| 519 |
+
|
| 520 |
+
return quats
|
| 521 |
+
|
| 522 |
+
def get_cur_rot(self) -> torch.Tensor:
|
| 523 |
+
"""
|
| 524 |
+
Return the underlying rotation in its current form
|
| 525 |
+
|
| 526 |
+
Returns:
|
| 527 |
+
The stored rotation
|
| 528 |
+
"""
|
| 529 |
+
if(self._rot_mats is not None):
|
| 530 |
+
return self._rot_mats
|
| 531 |
+
elif(self._quats is not None):
|
| 532 |
+
return self._quats
|
| 533 |
+
else:
|
| 534 |
+
raise ValueError("Both rotations are None")
|
| 535 |
+
|
| 536 |
+
# Rotation functions
|
| 537 |
+
|
| 538 |
+
def compose_q_update_vec(self,
|
| 539 |
+
q_update_vec: torch.Tensor,
|
| 540 |
+
normalize_quats: bool = True
|
| 541 |
+
) -> Rotation:
|
| 542 |
+
"""
|
| 543 |
+
Returns a new quaternion Rotation after updating the current
|
| 544 |
+
object's underlying rotation with a quaternion update, formatted
|
| 545 |
+
as a [*, 3] tensor whose final three columns represent x, y, z such
|
| 546 |
+
that (1, x, y, z) is the desired (not necessarily unit) quaternion
|
| 547 |
+
update.
|
| 548 |
+
|
| 549 |
+
Args:
|
| 550 |
+
q_update_vec:
|
| 551 |
+
A [*, 3] quaternion update tensor
|
| 552 |
+
normalize_quats:
|
| 553 |
+
Whether to normalize the output quaternion
|
| 554 |
+
Returns:
|
| 555 |
+
An updated Rotation
|
| 556 |
+
"""
|
| 557 |
+
quats = self.get_quats()
|
| 558 |
+
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
|
| 559 |
+
return Rotation(
|
| 560 |
+
rot_mats=None,
|
| 561 |
+
quats=new_quats,
|
| 562 |
+
normalize_quats=normalize_quats,
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
def compose_r(self, r: Rotation) -> Rotation:
|
| 566 |
+
"""
|
| 567 |
+
Compose the rotation matrices of the current Rotation object with
|
| 568 |
+
those of another.
|
| 569 |
+
|
| 570 |
+
Args:
|
| 571 |
+
r:
|
| 572 |
+
An update rotation object
|
| 573 |
+
Returns:
|
| 574 |
+
An updated rotation object
|
| 575 |
+
"""
|
| 576 |
+
r1 = self.get_rot_mats()
|
| 577 |
+
r2 = r.get_rot_mats()
|
| 578 |
+
new_rot_mats = rot_matmul(r1, r2)
|
| 579 |
+
return Rotation(rot_mats=new_rot_mats, quats=None)
|
| 580 |
+
|
| 581 |
+
def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:
|
| 582 |
+
"""
|
| 583 |
+
Compose the quaternions of the current Rotation object with those
|
| 584 |
+
of another.
|
| 585 |
+
|
| 586 |
+
Depending on whether either Rotation was initialized with
|
| 587 |
+
quaternions, this function may call torch.linalg.eigh.
|
| 588 |
+
|
| 589 |
+
Args:
|
| 590 |
+
r:
|
| 591 |
+
An update rotation object
|
| 592 |
+
Returns:
|
| 593 |
+
An updated rotation object
|
| 594 |
+
"""
|
| 595 |
+
q1 = self.get_quats()
|
| 596 |
+
q2 = r.get_quats()
|
| 597 |
+
new_quats = quat_multiply(q1, q2)
|
| 598 |
+
return Rotation(
|
| 599 |
+
rot_mats=None, quats=new_quats, normalize_quats=normalize_quats
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
def apply(self, pts: torch.Tensor) -> torch.Tensor:
|
| 603 |
+
"""
|
| 604 |
+
Apply the current Rotation as a rotation matrix to a set of 3D
|
| 605 |
+
coordinates.
|
| 606 |
+
|
| 607 |
+
Args:
|
| 608 |
+
pts:
|
| 609 |
+
A [*, 3] set of points
|
| 610 |
+
Returns:
|
| 611 |
+
[*, 3] rotated points
|
| 612 |
+
"""
|
| 613 |
+
rot_mats = self.get_rot_mats()
|
| 614 |
+
return rot_vec_mul(rot_mats, pts)
|
| 615 |
+
|
| 616 |
+
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
|
| 617 |
+
"""
|
| 618 |
+
The inverse of the apply() method.
|
| 619 |
+
|
| 620 |
+
Args:
|
| 621 |
+
pts:
|
| 622 |
+
A [*, 3] set of points
|
| 623 |
+
Returns:
|
| 624 |
+
[*, 3] inverse-rotated points
|
| 625 |
+
"""
|
| 626 |
+
rot_mats = self.get_rot_mats()
|
| 627 |
+
inv_rot_mats = invert_rot_mat(rot_mats)
|
| 628 |
+
return rot_vec_mul(inv_rot_mats, pts)
|
| 629 |
+
|
| 630 |
+
def invert(self) -> Rotation:
|
| 631 |
+
"""
|
| 632 |
+
Returns the inverse of the current Rotation.
|
| 633 |
+
|
| 634 |
+
Returns:
|
| 635 |
+
The inverse of the current Rotation
|
| 636 |
+
"""
|
| 637 |
+
if(self._rot_mats is not None):
|
| 638 |
+
return Rotation(
|
| 639 |
+
rot_mats=invert_rot_mat(self._rot_mats),
|
| 640 |
+
quats=None
|
| 641 |
+
)
|
| 642 |
+
elif(self._quats is not None):
|
| 643 |
+
return Rotation(
|
| 644 |
+
rot_mats=None,
|
| 645 |
+
quats=invert_quat(self._quats),
|
| 646 |
+
normalize_quats=False,
|
| 647 |
+
)
|
| 648 |
+
else:
|
| 649 |
+
raise ValueError("Both rotations are None")
|
| 650 |
+
|
| 651 |
+
# "Tensor" stuff
|
| 652 |
+
|
| 653 |
+
def unsqueeze(self,
|
| 654 |
+
dim: int,
|
| 655 |
+
) -> Rigid:
|
| 656 |
+
"""
|
| 657 |
+
Analogous to torch.unsqueeze. The dimension is relative to the
|
| 658 |
+
shape of the Rotation object.
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
dim: A positive or negative dimension index.
|
| 662 |
+
Returns:
|
| 663 |
+
The unsqueezed Rotation.
|
| 664 |
+
"""
|
| 665 |
+
if dim >= len(self.shape):
|
| 666 |
+
raise ValueError("Invalid dimension")
|
| 667 |
+
|
| 668 |
+
if(self._rot_mats is not None):
|
| 669 |
+
rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
|
| 670 |
+
return Rotation(rot_mats=rot_mats, quats=None)
|
| 671 |
+
elif(self._quats is not None):
|
| 672 |
+
quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
|
| 673 |
+
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
|
| 674 |
+
else:
|
| 675 |
+
raise ValueError("Both rotations are None")
|
| 676 |
+
|
| 677 |
+
@staticmethod
|
| 678 |
+
def cat(
|
| 679 |
+
rs: Sequence[Rotation],
|
| 680 |
+
dim: int,
|
| 681 |
+
) -> Rigid:
|
| 682 |
+
"""
|
| 683 |
+
Concatenates rotations along one of the batch dimensions. Analogous
|
| 684 |
+
to torch.cat().
|
| 685 |
+
|
| 686 |
+
Note that the output of this operation is always a rotation matrix,
|
| 687 |
+
regardless of the format of input rotations.
|
| 688 |
+
|
| 689 |
+
Args:
|
| 690 |
+
rs:
|
| 691 |
+
A list of rotation objects
|
| 692 |
+
dim:
|
| 693 |
+
The dimension along which the rotations should be
|
| 694 |
+
concatenated
|
| 695 |
+
Returns:
|
| 696 |
+
A concatenated Rotation object in rotation matrix format
|
| 697 |
+
"""
|
| 698 |
+
rot_mats = [r.get_rot_mats() for r in rs]
|
| 699 |
+
rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)
|
| 700 |
+
|
| 701 |
+
return Rotation(rot_mats=rot_mats, quats=None)
|
| 702 |
+
|
| 703 |
+
def map_tensor_fn(self,
|
| 704 |
+
fn: Callable[torch.Tensor, torch.Tensor]
|
| 705 |
+
) -> Rotation:
|
| 706 |
+
"""
|
| 707 |
+
Apply a Tensor -> Tensor function to underlying rotation tensors,
|
| 708 |
+
mapping over the rotation dimension(s). Can be used e.g. to sum out
|
| 709 |
+
a one-hot batch dimension.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
fn:
|
| 713 |
+
A Tensor -> Tensor function to be mapped over the Rotation
|
| 714 |
+
Returns:
|
| 715 |
+
The transformed Rotation object
|
| 716 |
+
"""
|
| 717 |
+
if(self._rot_mats is not None):
|
| 718 |
+
rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
|
| 719 |
+
rot_mats = torch.stack(
|
| 720 |
+
list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1
|
| 721 |
+
)
|
| 722 |
+
rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
|
| 723 |
+
return Rotation(rot_mats=rot_mats, quats=None)
|
| 724 |
+
elif(self._quats is not None):
|
| 725 |
+
quats = torch.stack(
|
| 726 |
+
list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1
|
| 727 |
+
)
|
| 728 |
+
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
|
| 729 |
+
else:
|
| 730 |
+
raise ValueError("Both rotations are None")
|
| 731 |
+
|
| 732 |
+
def cuda(self) -> Rotation:
|
| 733 |
+
"""
|
| 734 |
+
Analogous to the cuda() method of torch Tensors
|
| 735 |
+
|
| 736 |
+
Returns:
|
| 737 |
+
A copy of the Rotation in CUDA memory
|
| 738 |
+
"""
|
| 739 |
+
if(self._rot_mats is not None):
|
| 740 |
+
return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
|
| 741 |
+
elif(self._quats is not None):
|
| 742 |
+
return Rotation(
|
| 743 |
+
rot_mats=None,
|
| 744 |
+
quats=self._quats.cuda(),
|
| 745 |
+
normalize_quats=False
|
| 746 |
+
)
|
| 747 |
+
else:
|
| 748 |
+
raise ValueError("Both rotations are None")
|
| 749 |
+
|
| 750 |
+
def to(self,
|
| 751 |
+
device: Optional[torch.device],
|
| 752 |
+
dtype: Optional[torch.dtype]
|
| 753 |
+
) -> Rotation:
|
| 754 |
+
"""
|
| 755 |
+
Analogous to the to() method of torch Tensors
|
| 756 |
+
|
| 757 |
+
Args:
|
| 758 |
+
device:
|
| 759 |
+
A torch device
|
| 760 |
+
dtype:
|
| 761 |
+
A torch dtype
|
| 762 |
+
Returns:
|
| 763 |
+
A copy of the Rotation using the new device and dtype
|
| 764 |
+
"""
|
| 765 |
+
if(self._rot_mats is not None):
|
| 766 |
+
return Rotation(
|
| 767 |
+
rot_mats=self._rot_mats.to(device=device, dtype=dtype),
|
| 768 |
+
quats=None,
|
| 769 |
+
)
|
| 770 |
+
elif(self._quats is not None):
|
| 771 |
+
return Rotation(
|
| 772 |
+
rot_mats=None,
|
| 773 |
+
quats=self._quats.to(device=device, dtype=dtype),
|
| 774 |
+
normalize_quats=False,
|
| 775 |
+
)
|
| 776 |
+
else:
|
| 777 |
+
raise ValueError("Both rotations are None")
|
| 778 |
+
|
| 779 |
+
def detach(self) -> Rotation:
|
| 780 |
+
"""
|
| 781 |
+
Returns a copy of the Rotation whose underlying Tensor has been
|
| 782 |
+
detached from its torch graph.
|
| 783 |
+
|
| 784 |
+
Returns:
|
| 785 |
+
A copy of the Rotation whose underlying Tensor has been detached
|
| 786 |
+
from its torch graph
|
| 787 |
+
"""
|
| 788 |
+
if(self._rot_mats is not None):
|
| 789 |
+
return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
|
| 790 |
+
elif(self._quats is not None):
|
| 791 |
+
return Rotation(
|
| 792 |
+
rot_mats=None,
|
| 793 |
+
quats=self._quats.detach(),
|
| 794 |
+
normalize_quats=False,
|
| 795 |
+
)
|
| 796 |
+
else:
|
| 797 |
+
raise ValueError("Both rotations are None")
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
class Rigid:
|
| 801 |
+
"""
|
| 802 |
+
A class representing a rigid transformation. Little more than a wrapper
|
| 803 |
+
around two objects: a Rotation object and a [*, 3] translation
|
| 804 |
+
Designed to behave approximately like a single torch tensor with the
|
| 805 |
+
shape of the shared batch dimensions of its component parts.
|
| 806 |
+
"""
|
| 807 |
+
def __init__(self,
|
| 808 |
+
rots: Optional[Rotation],
|
| 809 |
+
trans: Optional[torch.Tensor],
|
| 810 |
+
):
|
| 811 |
+
"""
|
| 812 |
+
Args:
|
| 813 |
+
rots: A [*, 3, 3] rotation tensor
|
| 814 |
+
trans: A corresponding [*, 3] translation tensor
|
| 815 |
+
"""
|
| 816 |
+
# (we need device, dtype, etc. from at least one input)
|
| 817 |
+
|
| 818 |
+
batch_dims, dtype, device, requires_grad = None, None, None, None
|
| 819 |
+
if(trans is not None):
|
| 820 |
+
batch_dims = trans.shape[:-1]
|
| 821 |
+
dtype = trans.dtype
|
| 822 |
+
device = trans.device
|
| 823 |
+
requires_grad = trans.requires_grad
|
| 824 |
+
elif(rots is not None):
|
| 825 |
+
batch_dims = rots.shape
|
| 826 |
+
dtype = rots.dtype
|
| 827 |
+
device = rots.device
|
| 828 |
+
requires_grad = rots.requires_grad
|
| 829 |
+
else:
|
| 830 |
+
raise ValueError("At least one input argument must be specified")
|
| 831 |
+
|
| 832 |
+
if(rots is None):
|
| 833 |
+
rots = Rotation.identity(
|
| 834 |
+
batch_dims, dtype, device, requires_grad,
|
| 835 |
+
)
|
| 836 |
+
elif(trans is None):
|
| 837 |
+
trans = identity_trans(
|
| 838 |
+
batch_dims, dtype, device, requires_grad,
|
| 839 |
+
)
|
| 840 |
+
|
| 841 |
+
if((rots.shape != trans.shape[:-1]) or
|
| 842 |
+
(rots.device != trans.device)):
|
| 843 |
+
raise ValueError("Rots and trans incompatible")
|
| 844 |
+
|
| 845 |
+
# Force full precision. Happens to the rotations automatically.
|
| 846 |
+
trans = trans.to(dtype=torch.float32)
|
| 847 |
+
|
| 848 |
+
self._rots = rots
|
| 849 |
+
self._trans = trans
|
| 850 |
+
|
| 851 |
+
@staticmethod
|
| 852 |
+
def identity(
|
| 853 |
+
shape: Tuple[int],
|
| 854 |
+
dtype: Optional[torch.dtype] = None,
|
| 855 |
+
device: Optional[torch.device] = None,
|
| 856 |
+
requires_grad: bool = True,
|
| 857 |
+
fmt: str = "quat",
|
| 858 |
+
) -> Rigid:
|
| 859 |
+
"""
|
| 860 |
+
Constructs an identity transformation.
|
| 861 |
+
|
| 862 |
+
Args:
|
| 863 |
+
shape:
|
| 864 |
+
The desired shape
|
| 865 |
+
dtype:
|
| 866 |
+
The dtype of both internal tensors
|
| 867 |
+
device:
|
| 868 |
+
The device of both internal tensors
|
| 869 |
+
requires_grad:
|
| 870 |
+
Whether grad should be enabled for the internal tensors
|
| 871 |
+
Returns:
|
| 872 |
+
The identity transformation
|
| 873 |
+
"""
|
| 874 |
+
return Rigid(
|
| 875 |
+
Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
|
| 876 |
+
identity_trans(shape, dtype, device, requires_grad),
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
def __getitem__(self,
|
| 880 |
+
index: Any,
|
| 881 |
+
) -> Rigid:
|
| 882 |
+
"""
|
| 883 |
+
Indexes the affine transformation with PyTorch-style indices.
|
| 884 |
+
The index is applied to the shared dimensions of both the rotation
|
| 885 |
+
and the translation.
|
| 886 |
+
|
| 887 |
+
E.g.::
|
| 888 |
+
|
| 889 |
+
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)
|
| 890 |
+
t = Rigid(r, torch.rand(10, 10, 3))
|
| 891 |
+
indexed = t[3, 4:6]
|
| 892 |
+
assert(indexed.shape == (2,))
|
| 893 |
+
assert(indexed.get_rots().shape == (2,))
|
| 894 |
+
assert(indexed.get_trans().shape == (2, 3))
|
| 895 |
+
|
| 896 |
+
Args:
|
| 897 |
+
index: A standard torch tensor index. E.g. 8, (10, None, 3),
|
| 898 |
+
or (3, slice(0, 1, None))
|
| 899 |
+
Returns:
|
| 900 |
+
The indexed tensor
|
| 901 |
+
"""
|
| 902 |
+
if type(index) != tuple:
|
| 903 |
+
index = (index,)
|
| 904 |
+
|
| 905 |
+
return Rigid(
|
| 906 |
+
self._rots[index],
|
| 907 |
+
self._trans[index + (slice(None),)],
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
def __mul__(self,
|
| 911 |
+
right: torch.Tensor,
|
| 912 |
+
) -> Rigid:
|
| 913 |
+
"""
|
| 914 |
+
Pointwise left multiplication of the transformation with a tensor.
|
| 915 |
+
Can be used to e.g. mask the Rigid.
|
| 916 |
+
|
| 917 |
+
Args:
|
| 918 |
+
right:
|
| 919 |
+
The tensor multiplicand
|
| 920 |
+
Returns:
|
| 921 |
+
The product
|
| 922 |
+
"""
|
| 923 |
+
if not(isinstance(right, torch.Tensor)):
|
| 924 |
+
raise TypeError("The other multiplicand must be a Tensor")
|
| 925 |
+
|
| 926 |
+
new_rots = self._rots * right
|
| 927 |
+
new_trans = self._trans * right[..., None]
|
| 928 |
+
|
| 929 |
+
return Rigid(new_rots, new_trans)
|
| 930 |
+
|
| 931 |
+
def __rmul__(self,
|
| 932 |
+
left: torch.Tensor,
|
| 933 |
+
) -> Rigid:
|
| 934 |
+
"""
|
| 935 |
+
Reverse pointwise multiplication of the transformation with a
|
| 936 |
+
tensor.
|
| 937 |
+
|
| 938 |
+
Args:
|
| 939 |
+
left:
|
| 940 |
+
The left multiplicand
|
| 941 |
+
Returns:
|
| 942 |
+
The product
|
| 943 |
+
"""
|
| 944 |
+
return self.__mul__(left)
|
| 945 |
+
|
| 946 |
+
@property
|
| 947 |
+
def shape(self) -> torch.Size:
|
| 948 |
+
"""
|
| 949 |
+
Returns the shape of the shared dimensions of the rotation and
|
| 950 |
+
the translation.
|
| 951 |
+
|
| 952 |
+
Returns:
|
| 953 |
+
The shape of the transformation
|
| 954 |
+
"""
|
| 955 |
+
s = self._trans.shape[:-1]
|
| 956 |
+
return s
|
| 957 |
+
|
| 958 |
+
@property
|
| 959 |
+
def device(self) -> torch.device:
|
| 960 |
+
"""
|
| 961 |
+
Returns the device on which the Rigid's tensors are located.
|
| 962 |
+
|
| 963 |
+
Returns:
|
| 964 |
+
The device on which the Rigid's tensors are located
|
| 965 |
+
"""
|
| 966 |
+
return self._trans.device
|
| 967 |
+
|
| 968 |
+
def get_rots(self) -> Rotation:
|
| 969 |
+
"""
|
| 970 |
+
Getter for the rotation.
|
| 971 |
+
|
| 972 |
+
Returns:
|
| 973 |
+
The rotation object
|
| 974 |
+
"""
|
| 975 |
+
return self._rots
|
| 976 |
+
|
| 977 |
+
def get_trans(self) -> torch.Tensor:
|
| 978 |
+
"""
|
| 979 |
+
Getter for the translation.
|
| 980 |
+
|
| 981 |
+
Returns:
|
| 982 |
+
The stored translation
|
| 983 |
+
"""
|
| 984 |
+
return self._trans
|
| 985 |
+
|
| 986 |
+
def compose_q_update_vec(self,
|
| 987 |
+
q_update_vec: torch.Tensor,
|
| 988 |
+
) -> Rigid:
|
| 989 |
+
"""
|
| 990 |
+
Composes the transformation with a quaternion update vector of
|
| 991 |
+
shape [*, 6], where the final 6 columns represent the x, y, and
|
| 992 |
+
z values of a quaternion of form (1, x, y, z) followed by a 3D
|
| 993 |
+
translation.
|
| 994 |
+
|
| 995 |
+
Args:
|
| 996 |
+
q_vec: The quaternion update vector.
|
| 997 |
+
Returns:
|
| 998 |
+
The composed transformation.
|
| 999 |
+
"""
|
| 1000 |
+
q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
|
| 1001 |
+
new_rots = self._rots.compose_q_update_vec(q_vec)
|
| 1002 |
+
|
| 1003 |
+
trans_update = self._rots.apply(t_vec)
|
| 1004 |
+
new_translation = self._trans + trans_update
|
| 1005 |
+
|
| 1006 |
+
return Rigid(new_rots, new_translation)
|
| 1007 |
+
|
| 1008 |
+
def compose(self,
|
| 1009 |
+
r: Rigid,
|
| 1010 |
+
) -> Rigid:
|
| 1011 |
+
"""
|
| 1012 |
+
Composes the current rigid object with another.
|
| 1013 |
+
|
| 1014 |
+
Args:
|
| 1015 |
+
r:
|
| 1016 |
+
Another Rigid object
|
| 1017 |
+
Returns:
|
| 1018 |
+
The composition of the two transformations
|
| 1019 |
+
"""
|
| 1020 |
+
new_rot = self._rots.compose_r(r._rots)
|
| 1021 |
+
new_trans = self._rots.apply(r._trans) + self._trans
|
| 1022 |
+
return Rigid(new_rot, new_trans)
|
| 1023 |
+
|
| 1024 |
+
def apply(self,
|
| 1025 |
+
pts: torch.Tensor,
|
| 1026 |
+
) -> torch.Tensor:
|
| 1027 |
+
"""
|
| 1028 |
+
Applies the transformation to a coordinate tensor.
|
| 1029 |
+
|
| 1030 |
+
Args:
|
| 1031 |
+
pts: A [*, 3] coordinate tensor.
|
| 1032 |
+
Returns:
|
| 1033 |
+
The transformed points.
|
| 1034 |
+
"""
|
| 1035 |
+
rotated = self._rots.apply(pts)
|
| 1036 |
+
return rotated + self._trans
|
| 1037 |
+
|
| 1038 |
+
def invert_apply(self,
|
| 1039 |
+
pts: torch.Tensor
|
| 1040 |
+
) -> torch.Tensor:
|
| 1041 |
+
"""
|
| 1042 |
+
Applies the inverse of the transformation to a coordinate tensor.
|
| 1043 |
+
|
| 1044 |
+
Args:
|
| 1045 |
+
pts: A [*, 3] coordinate tensor
|
| 1046 |
+
Returns:
|
| 1047 |
+
The transformed points.
|
| 1048 |
+
"""
|
| 1049 |
+
pts = pts - self._trans
|
| 1050 |
+
return self._rots.invert_apply(pts)
|
| 1051 |
+
|
| 1052 |
+
def invert(self) -> Rigid:
|
| 1053 |
+
"""
|
| 1054 |
+
Inverts the transformation.
|
| 1055 |
+
|
| 1056 |
+
Returns:
|
| 1057 |
+
The inverse transformation.
|
| 1058 |
+
"""
|
| 1059 |
+
rot_inv = self._rots.invert()
|
| 1060 |
+
trn_inv = rot_inv.apply(self._trans)
|
| 1061 |
+
|
| 1062 |
+
return Rigid(rot_inv, -1 * trn_inv)
|
| 1063 |
+
|
| 1064 |
+
def map_tensor_fn(self,
|
| 1065 |
+
fn: Callable[torch.Tensor, torch.Tensor]
|
| 1066 |
+
) -> Rigid:
|
| 1067 |
+
"""
|
| 1068 |
+
Apply a Tensor -> Tensor function to underlying translation and
|
| 1069 |
+
rotation tensors, mapping over the translation/rotation dimensions
|
| 1070 |
+
respectively.
|
| 1071 |
+
|
| 1072 |
+
Args:
|
| 1073 |
+
fn:
|
| 1074 |
+
A Tensor -> Tensor function to be mapped over the Rigid
|
| 1075 |
+
Returns:
|
| 1076 |
+
The transformed Rigid object
|
| 1077 |
+
"""
|
| 1078 |
+
new_rots = self._rots.map_tensor_fn(fn)
|
| 1079 |
+
new_trans = torch.stack(
|
| 1080 |
+
list(map(fn, torch.unbind(self._trans, dim=-1))),
|
| 1081 |
+
dim=-1
|
| 1082 |
+
)
|
| 1083 |
+
|
| 1084 |
+
return Rigid(new_rots, new_trans)
|
| 1085 |
+
|
| 1086 |
+
def to_tensor_4x4(self) -> torch.Tensor:
|
| 1087 |
+
"""
|
| 1088 |
+
Converts a transformation to a homogenous transformation tensor.
|
| 1089 |
+
|
| 1090 |
+
Returns:
|
| 1091 |
+
A [*, 4, 4] homogenous transformation tensor
|
| 1092 |
+
"""
|
| 1093 |
+
tensor = self._trans.new_zeros((*self.shape, 4, 4))
|
| 1094 |
+
tensor[..., :3, :3] = self._rots.get_rot_mats()
|
| 1095 |
+
tensor[..., :3, 3] = self._trans
|
| 1096 |
+
tensor[..., 3, 3] = 1
|
| 1097 |
+
return tensor
|
| 1098 |
+
|
| 1099 |
+
@staticmethod
|
| 1100 |
+
def from_tensor_4x4(
|
| 1101 |
+
t: torch.Tensor
|
| 1102 |
+
) -> Rigid:
|
| 1103 |
+
"""
|
| 1104 |
+
Constructs a transformation from a homogenous transformation
|
| 1105 |
+
tensor.
|
| 1106 |
+
|
| 1107 |
+
Args:
|
| 1108 |
+
t: [*, 4, 4] homogenous transformation tensor
|
| 1109 |
+
Returns:
|
| 1110 |
+
T object with shape [*]
|
| 1111 |
+
"""
|
| 1112 |
+
if(t.shape[-2:] != (4, 4)):
|
| 1113 |
+
raise ValueError("Incorrectly shaped input tensor")
|
| 1114 |
+
|
| 1115 |
+
rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
|
| 1116 |
+
trans = t[..., :3, 3]
|
| 1117 |
+
|
| 1118 |
+
return Rigid(rots, trans)
|
| 1119 |
+
|
| 1120 |
+
def to_tensor_7(self) -> torch.Tensor:
|
| 1121 |
+
"""
|
| 1122 |
+
Converts a transformation to a tensor with 7 final columns, four
|
| 1123 |
+
for the quaternion followed by three for the translation.
|
| 1124 |
+
|
| 1125 |
+
Returns:
|
| 1126 |
+
A [*, 7] tensor representation of the transformation
|
| 1127 |
+
"""
|
| 1128 |
+
tensor = self._trans.new_zeros((*self.shape, 7))
|
| 1129 |
+
tensor[..., :4] = self._rots.get_quats()
|
| 1130 |
+
tensor[..., 4:] = self._trans
|
| 1131 |
+
|
| 1132 |
+
return tensor
|
| 1133 |
+
|
| 1134 |
+
@staticmethod
|
| 1135 |
+
def from_tensor_7(
|
| 1136 |
+
t: torch.Tensor,
|
| 1137 |
+
normalize_quats: bool = False,
|
| 1138 |
+
) -> Rigid:
|
| 1139 |
+
if(t.shape[-1] != 7):
|
| 1140 |
+
raise ValueError("Incorrectly shaped input tensor")
|
| 1141 |
+
|
| 1142 |
+
quats, trans = t[..., :4], t[..., 4:]
|
| 1143 |
+
|
| 1144 |
+
rots = Rotation(
|
| 1145 |
+
rot_mats=None,
|
| 1146 |
+
quats=quats,
|
| 1147 |
+
normalize_quats=normalize_quats
|
| 1148 |
+
)
|
| 1149 |
+
|
| 1150 |
+
return Rigid(rots, trans)
|
| 1151 |
+
|
| 1152 |
+
@staticmethod
|
| 1153 |
+
def from_3_points(
|
| 1154 |
+
p_neg_x_axis: torch.Tensor,
|
| 1155 |
+
origin: torch.Tensor,
|
| 1156 |
+
p_xy_plane: torch.Tensor,
|
| 1157 |
+
eps: float = 1e-8
|
| 1158 |
+
) -> Rigid:
|
| 1159 |
+
"""
|
| 1160 |
+
Implements algorithm 21. Constructs transformations from sets of 3
|
| 1161 |
+
points using the Gram-Schmidt algorithm.
|
| 1162 |
+
|
| 1163 |
+
Args:
|
| 1164 |
+
p_neg_x_axis: [*, 3] coordinates
|
| 1165 |
+
origin: [*, 3] coordinates used as frame origins
|
| 1166 |
+
p_xy_plane: [*, 3] coordinates
|
| 1167 |
+
eps: Small epsilon value
|
| 1168 |
+
Returns:
|
| 1169 |
+
A transformation object of shape [*]
|
| 1170 |
+
"""
|
| 1171 |
+
p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)
|
| 1172 |
+
origin = torch.unbind(origin, dim=-1)
|
| 1173 |
+
p_xy_plane = torch.unbind(p_xy_plane, dim=-1)
|
| 1174 |
+
|
| 1175 |
+
e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]
|
| 1176 |
+
e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]
|
| 1177 |
+
|
| 1178 |
+
denom = torch.sqrt(sum((c * c for c in e0)) + eps)
|
| 1179 |
+
e0 = [c / denom for c in e0]
|
| 1180 |
+
dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
|
| 1181 |
+
e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
|
| 1182 |
+
denom = torch.sqrt(sum((c * c for c in e1)) + eps)
|
| 1183 |
+
e1 = [c / denom for c in e1]
|
| 1184 |
+
e2 = [
|
| 1185 |
+
e0[1] * e1[2] - e0[2] * e1[1],
|
| 1186 |
+
e0[2] * e1[0] - e0[0] * e1[2],
|
| 1187 |
+
e0[0] * e1[1] - e0[1] * e1[0],
|
| 1188 |
+
]
|
| 1189 |
+
|
| 1190 |
+
rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
|
| 1191 |
+
rots = rots.reshape(rots.shape[:-1] + (3, 3))
|
| 1192 |
+
|
| 1193 |
+
rot_obj = Rotation(rot_mats=rots, quats=None)
|
| 1194 |
+
|
| 1195 |
+
return Rigid(rot_obj, torch.stack(origin, dim=-1))
|
| 1196 |
+
|
| 1197 |
+
def unsqueeze(self,
|
| 1198 |
+
dim: int,
|
| 1199 |
+
) -> Rigid:
|
| 1200 |
+
"""
|
| 1201 |
+
Analogous to torch.unsqueeze. The dimension is relative to the
|
| 1202 |
+
shared dimensions of the rotation/translation.
|
| 1203 |
+
|
| 1204 |
+
Args:
|
| 1205 |
+
dim: A positive or negative dimension index.
|
| 1206 |
+
Returns:
|
| 1207 |
+
The unsqueezed transformation.
|
| 1208 |
+
"""
|
| 1209 |
+
if dim >= len(self.shape):
|
| 1210 |
+
raise ValueError("Invalid dimension")
|
| 1211 |
+
rots = self._rots.unsqueeze(dim)
|
| 1212 |
+
trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
|
| 1213 |
+
|
| 1214 |
+
return Rigid(rots, trans)
|
| 1215 |
+
|
| 1216 |
+
@staticmethod
|
| 1217 |
+
def cat(
|
| 1218 |
+
ts: Sequence[Rigid],
|
| 1219 |
+
dim: int,
|
| 1220 |
+
) -> Rigid:
|
| 1221 |
+
"""
|
| 1222 |
+
Concatenates transformations along a new dimension.
|
| 1223 |
+
|
| 1224 |
+
Args:
|
| 1225 |
+
ts:
|
| 1226 |
+
A list of T objects
|
| 1227 |
+
dim:
|
| 1228 |
+
The dimension along which the transformations should be
|
| 1229 |
+
concatenated
|
| 1230 |
+
Returns:
|
| 1231 |
+
A concatenated transformation object
|
| 1232 |
+
"""
|
| 1233 |
+
rots = Rotation.cat([t._rots for t in ts], dim)
|
| 1234 |
+
trans = torch.cat(
|
| 1235 |
+
[t._trans for t in ts], dim=dim if dim >= 0 else dim - 1
|
| 1236 |
+
)
|
| 1237 |
+
|
| 1238 |
+
return Rigid(rots, trans)
|
| 1239 |
+
|
| 1240 |
+
def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:
|
| 1241 |
+
"""
|
| 1242 |
+
Applies a Rotation -> Rotation function to the stored rotation
|
| 1243 |
+
object.
|
| 1244 |
+
|
| 1245 |
+
Args:
|
| 1246 |
+
fn: A function of type Rotation -> Rotation
|
| 1247 |
+
Returns:
|
| 1248 |
+
A transformation object with a transformed rotation.
|
| 1249 |
+
"""
|
| 1250 |
+
return Rigid(fn(self._rots), self._trans)
|
| 1251 |
+
|
| 1252 |
+
def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:
|
| 1253 |
+
"""
|
| 1254 |
+
Applies a Tensor -> Tensor function to the stored translation.
|
| 1255 |
+
|
| 1256 |
+
Args:
|
| 1257 |
+
fn:
|
| 1258 |
+
A function of type Tensor -> Tensor to be applied to the
|
| 1259 |
+
translation
|
| 1260 |
+
Returns:
|
| 1261 |
+
A transformation object with a transformed translation.
|
| 1262 |
+
"""
|
| 1263 |
+
return Rigid(self._rots, fn(self._trans))
|
| 1264 |
+
|
| 1265 |
+
def scale_translation(self, trans_scale_factor: float) -> Rigid:
|
| 1266 |
+
"""
|
| 1267 |
+
Scales the translation by a constant factor.
|
| 1268 |
+
|
| 1269 |
+
Args:
|
| 1270 |
+
trans_scale_factor:
|
| 1271 |
+
The constant factor
|
| 1272 |
+
Returns:
|
| 1273 |
+
A transformation object with a scaled translation.
|
| 1274 |
+
"""
|
| 1275 |
+
fn = lambda t: t * trans_scale_factor
|
| 1276 |
+
return self.apply_trans_fn(fn)
|
| 1277 |
+
|
| 1278 |
+
def stop_rot_gradient(self) -> Rigid:
|
| 1279 |
+
"""
|
| 1280 |
+
Detaches the underlying rotation object
|
| 1281 |
+
|
| 1282 |
+
Returns:
|
| 1283 |
+
A transformation object with detached rotations
|
| 1284 |
+
"""
|
| 1285 |
+
fn = lambda r: r.detach()
|
| 1286 |
+
return self.apply_rot_fn(fn)
|
| 1287 |
+
|
| 1288 |
+
@staticmethod
|
| 1289 |
+
def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):
|
| 1290 |
+
"""
|
| 1291 |
+
Returns a transformation object from reference coordinates.
|
| 1292 |
+
|
| 1293 |
+
Note that this method does not take care of symmetries. If you
|
| 1294 |
+
provide the atom positions in the non-standard way, the N atom will
|
| 1295 |
+
end up not at [-0.527250, 1.359329, 0.0] but instead at
|
| 1296 |
+
[-0.527250, -1.359329, 0.0]. You need to take care of such cases in
|
| 1297 |
+
your code.
|
| 1298 |
+
|
| 1299 |
+
Args:
|
| 1300 |
+
n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
|
| 1301 |
+
ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
|
| 1302 |
+
c_xyz: A [*, 3] tensor of carbon xyz coordinates.
|
| 1303 |
+
Returns:
|
| 1304 |
+
A transformation object. After applying the translation and
|
| 1305 |
+
rotation to the reference backbone, the coordinates will
|
| 1306 |
+
approximately equal to the input coordinates.
|
| 1307 |
+
"""
|
| 1308 |
+
translation = -1 * ca_xyz
|
| 1309 |
+
n_xyz = n_xyz + translation
|
| 1310 |
+
c_xyz = c_xyz + translation
|
| 1311 |
+
|
| 1312 |
+
c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
|
| 1313 |
+
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)
|
| 1314 |
+
sin_c1 = -c_y / norm
|
| 1315 |
+
cos_c1 = c_x / norm
|
| 1316 |
+
zeros = sin_c1.new_zeros(sin_c1.shape)
|
| 1317 |
+
ones = sin_c1.new_ones(sin_c1.shape)
|
| 1318 |
+
|
| 1319 |
+
c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
|
| 1320 |
+
c1_rots[..., 0, 0] = cos_c1
|
| 1321 |
+
c1_rots[..., 0, 1] = -1 * sin_c1
|
| 1322 |
+
c1_rots[..., 1, 0] = sin_c1
|
| 1323 |
+
c1_rots[..., 1, 1] = cos_c1
|
| 1324 |
+
c1_rots[..., 2, 2] = 1
|
| 1325 |
+
|
| 1326 |
+
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)
|
| 1327 |
+
sin_c2 = c_z / norm
|
| 1328 |
+
cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm
|
| 1329 |
+
|
| 1330 |
+
c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
|
| 1331 |
+
c2_rots[..., 0, 0] = cos_c2
|
| 1332 |
+
c2_rots[..., 0, 2] = sin_c2
|
| 1333 |
+
c2_rots[..., 1, 1] = 1
|
| 1334 |
+
c2_rots[..., 2, 0] = -1 * sin_c2
|
| 1335 |
+
c2_rots[..., 2, 2] = cos_c2
|
| 1336 |
+
|
| 1337 |
+
c_rots = rot_matmul(c2_rots, c1_rots)
|
| 1338 |
+
n_xyz = rot_vec_mul(c_rots, n_xyz)
|
| 1339 |
+
|
| 1340 |
+
_, n_y, n_z = [n_xyz[..., i] for i in range(3)]
|
| 1341 |
+
norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)
|
| 1342 |
+
sin_n = -n_z / norm
|
| 1343 |
+
cos_n = n_y / norm
|
| 1344 |
+
|
| 1345 |
+
n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
|
| 1346 |
+
n_rots[..., 0, 0] = 1
|
| 1347 |
+
n_rots[..., 1, 1] = cos_n
|
| 1348 |
+
n_rots[..., 1, 2] = -1 * sin_n
|
| 1349 |
+
n_rots[..., 2, 1] = sin_n
|
| 1350 |
+
n_rots[..., 2, 2] = cos_n
|
| 1351 |
+
|
| 1352 |
+
rots = rot_matmul(n_rots, c_rots)
|
| 1353 |
+
|
| 1354 |
+
rots = rots.transpose(-1, -2)
|
| 1355 |
+
translation = -1 * translation
|
| 1356 |
+
|
| 1357 |
+
rot_obj = Rotation(rot_mats=rots, quats=None)
|
| 1358 |
+
|
| 1359 |
+
return Rigid(rot_obj, translation)
|
| 1360 |
+
|
| 1361 |
+
def cuda(self) -> Rigid:
|
| 1362 |
+
"""
|
| 1363 |
+
Moves the transformation object to GPU memory
|
| 1364 |
+
|
| 1365 |
+
Returns:
|
| 1366 |
+
A version of the transformation on GPU
|
| 1367 |
+
"""
|
| 1368 |
+
return Rigid(self._rots.cuda(), self._trans.cuda())
|
openfold/tensor_utils.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 AlQuraishi Laboratory
|
| 2 |
+
# Copyright 2021 DeepMind Technologies Limited
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from functools import partial
|
| 17 |
+
import logging
|
| 18 |
+
from typing import Tuple, List, Callable, Any, Dict, Sequence, Optional
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def add(m1, m2, inplace):
|
| 25 |
+
# The first operation in a checkpoint can't be in-place, but it's
|
| 26 |
+
# nice to have in-place addition during inference. Thus...
|
| 27 |
+
if(not inplace):
|
| 28 |
+
m1 = m1 + m2
|
| 29 |
+
else:
|
| 30 |
+
m1 += m2
|
| 31 |
+
|
| 32 |
+
return m1
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
|
| 36 |
+
zero_index = -1 * len(inds)
|
| 37 |
+
first_inds = list(range(len(tensor.shape[:zero_index])))
|
| 38 |
+
return tensor.permute(first_inds + [zero_index + i for i in inds])
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def flatten_final_dims(t: torch.Tensor, no_dims: int):
|
| 42 |
+
return t.reshape(t.shape[:-no_dims] + (-1,))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def masked_mean(mask, value, dim, eps=1e-4):
|
| 46 |
+
mask = mask.expand(*value.shape)
|
| 47 |
+
return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def pts_to_distogram(pts, min_bin=2.3125, max_bin=21.6875, no_bins=64):
|
| 51 |
+
boundaries = torch.linspace(
|
| 52 |
+
min_bin, max_bin, no_bins - 1, device=pts.device
|
| 53 |
+
)
|
| 54 |
+
dists = torch.sqrt(
|
| 55 |
+
torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1)
|
| 56 |
+
)
|
| 57 |
+
return torch.bucketize(dists, boundaries)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def dict_multimap(fn, dicts):
|
| 61 |
+
first = dicts[0]
|
| 62 |
+
new_dict = {}
|
| 63 |
+
for k, v in first.items():
|
| 64 |
+
all_v = [d[k] for d in dicts]
|
| 65 |
+
if type(v) is dict:
|
| 66 |
+
new_dict[k] = dict_multimap(fn, all_v)
|
| 67 |
+
else:
|
| 68 |
+
new_dict[k] = fn(all_v)
|
| 69 |
+
|
| 70 |
+
return new_dict
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def one_hot(x, v_bins):
|
| 74 |
+
reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),))
|
| 75 |
+
diffs = x[..., None] - reshaped_bins
|
| 76 |
+
am = torch.argmin(torch.abs(diffs), dim=-1)
|
| 77 |
+
return nn.functional.one_hot(am, num_classes=len(v_bins)).float()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def batched_gather(data, inds, dim=0, no_batch_dims=0):
|
| 81 |
+
ranges = []
|
| 82 |
+
for i, s in enumerate(data.shape[:no_batch_dims]):
|
| 83 |
+
r = torch.arange(s)
|
| 84 |
+
r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
|
| 85 |
+
ranges.append(r)
|
| 86 |
+
|
| 87 |
+
remaining_dims = [
|
| 88 |
+
slice(None) for _ in range(len(data.shape) - no_batch_dims)
|
| 89 |
+
]
|
| 90 |
+
remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds
|
| 91 |
+
ranges.extend(remaining_dims)
|
| 92 |
+
return data[ranges]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# With tree_map, a poor man's JAX tree_map
|
| 96 |
+
def dict_map(fn, dic, leaf_type):
|
| 97 |
+
new_dict = {}
|
| 98 |
+
for k, v in dic.items():
|
| 99 |
+
if type(v) is dict:
|
| 100 |
+
new_dict[k] = dict_map(fn, v, leaf_type)
|
| 101 |
+
else:
|
| 102 |
+
new_dict[k] = tree_map(fn, v, leaf_type)
|
| 103 |
+
|
| 104 |
+
return new_dict
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def tree_map(fn, tree, leaf_type):
|
| 108 |
+
if isinstance(tree, dict):
|
| 109 |
+
return dict_map(fn, tree, leaf_type)
|
| 110 |
+
elif isinstance(tree, list):
|
| 111 |
+
return [tree_map(fn, x, leaf_type) for x in tree]
|
| 112 |
+
elif isinstance(tree, tuple):
|
| 113 |
+
return tuple([tree_map(fn, x, leaf_type) for x in tree])
|
| 114 |
+
elif isinstance(tree, leaf_type):
|
| 115 |
+
return fn(tree)
|
| 116 |
+
else:
|
| 117 |
+
print(type(tree))
|
| 118 |
+
raise ValueError("Not supported")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
|
parse_complexes.py
CHANGED
|
@@ -22,6 +22,12 @@ from rdkit.Chem.Descriptors import ExactMolWt
|
|
| 22 |
import numpy as np
|
| 23 |
|
| 24 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# minimum molecular weight to consider sth a ligand
|
| 27 |
mol_wt_cutoff = 100
|
|
@@ -35,9 +41,6 @@ punctuation_regex = r"""(\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|
|
|
| 35 |
# tokenization regex (Schwaller)
|
| 36 |
molecule_regex = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
|
| 37 |
|
| 38 |
-
max_seq = 2046 # = 2048 - 2 (accounting for [CLS] and [SEP])
|
| 39 |
-
max_smiles = 510 # = 512 - 2
|
| 40 |
-
|
| 41 |
# filter out these common additives which occur in more than 75 complexes in the PDB
|
| 42 |
ubiquitous_ligands = ['PEG', 'ADP', 'FAD', 'NAD', 'ATP', 'MPD', 'NAP', 'GDP', 'MES',
|
| 43 |
'GTP', 'FMN', 'HEC', 'TRS', 'CIT', 'PGE', 'ANP', 'SAH', 'NDP',
|
|
@@ -60,11 +63,36 @@ ubiquitous_ligands = ['PEG', 'ADP', 'FAD', 'NAD', 'ATP', 'MPD', 'NAP', 'GDP', 'M
|
|
| 60 |
'PQQ', '9TY', 'DUR', 'PPV', 'SPM', 'SIA', 'DUP', 'GTX', '1PG',
|
| 61 |
'GUN', 'ETF', 'FDP', 'MFU', 'G2P', 'PC', 'DST', 'INI']
|
| 62 |
|
| 63 |
-
def get_protein_sequence_and_coords(receptor):
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
def tokenize_ligand(mol):
|
| 70 |
# convert to SMILES and map atoms
|
|
@@ -91,14 +119,17 @@ def tokenize_ligand(mol):
|
|
| 91 |
k = 0
|
| 92 |
conf_2d = AllChem.Compute2DCoords(mol)
|
| 93 |
token_pos_2d = []
|
|
|
|
| 94 |
for i,token in enumerate(masked_tokens):
|
| 95 |
if token != '':
|
| 96 |
token_pos_2d.append(tuple(mol.GetConformer(conf_2d).GetAtomPosition(atom_order[k])))
|
|
|
|
| 97 |
k += 1
|
| 98 |
else:
|
| 99 |
token_pos_2d.append((0.,0.,0.))
|
|
|
|
| 100 |
|
| 101 |
-
return smi, token_pos, token_pos_2d
|
| 102 |
|
| 103 |
def read_ligand_expo():
|
| 104 |
"""
|
|
@@ -109,9 +140,10 @@ def read_ligand_expo():
|
|
| 109 |
"""
|
| 110 |
file_name = "Components-smiles-stereo-oe.smi"
|
| 111 |
try:
|
| 112 |
-
df = pd.read_csv(file_name, sep="\t",
|
| 113 |
header=None,
|
| 114 |
-
names=["SMILES", "ID", "Name"]
|
|
|
|
| 115 |
except FileNotFoundError:
|
| 116 |
url = f"http://ligand-expo.rcsb.org/dictionaries/{file_name}"
|
| 117 |
print(url)
|
|
@@ -119,9 +151,9 @@ def read_ligand_expo():
|
|
| 119 |
open('Components-smiles-stereo-oe.smi', 'wb').write(r.content)
|
| 120 |
df = pd.read_csv(file_name, sep="\t",
|
| 121 |
header=None,
|
| 122 |
-
names=["SMILES", "ID", "Name"]
|
| 123 |
-
|
| 124 |
-
return df
|
| 125 |
|
| 126 |
|
| 127 |
def get_pdb_components(pdb_id):
|
|
@@ -138,7 +170,7 @@ def get_pdb_components(pdb_id):
|
|
| 138 |
return protein, ligand
|
| 139 |
|
| 140 |
|
| 141 |
-
def process_ligand(ligand, res_name,
|
| 142 |
"""
|
| 143 |
Add bond orders to a pdb ligand
|
| 144 |
1. Select the ligand component with name "res_name"
|
|
@@ -149,10 +181,10 @@ def process_ligand(ligand, res_name, expo_dict):
|
|
| 149 |
6. Assign the bond orders from the template from step 3
|
| 150 |
:param ligand: ligand as generated by prody
|
| 151 |
:param res_name: residue name of ligand to extract
|
| 152 |
-
:param
|
| 153 |
:return: molecule with bond orders assigned
|
| 154 |
"""
|
| 155 |
-
sub_smiles =
|
| 156 |
template = AllChem.MolFromSmiles(sub_smiles)
|
| 157 |
|
| 158 |
allres = ligand.select(f"resname {res_name}")
|
|
@@ -167,12 +199,36 @@ def process_ligand(ligand, res_name, expo_dict):
|
|
| 167 |
mols.append(AllChem.AssignBondOrdersFromTemplate(template, rd_mol))
|
| 168 |
return mols, template
|
| 169 |
|
| 170 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
try:
|
| 172 |
"""
|
| 173 |
Slit pdb into protein and ligands,
|
| 174 |
parse protein sequence and ligand tokens
|
| 175 |
-
:param
|
| 176 |
:param pdb_fn: pdb entry file name
|
| 177 |
:return:
|
| 178 |
"""
|
|
@@ -182,70 +238,103 @@ def process_entry(df_dict, pdb_fn):
|
|
| 182 |
|
| 183 |
ligand_mols = []
|
| 184 |
ligand_names = []
|
| 185 |
-
|
| 186 |
if ligand is not None:
|
| 187 |
# filter ligands by molecular weight
|
| 188 |
res_name_list = list(set(ligand.getResnames()))
|
| 189 |
for res in res_name_list:
|
| 190 |
-
|
|
|
|
|
|
|
| 191 |
|
| 192 |
mol_wt = ExactMolWt(template)
|
| 193 |
natoms = template.GetNumAtoms()
|
| 194 |
|
| 195 |
-
if mol_wt >= mol_wt_cutoff and natoms >= min_atoms
|
| 196 |
# only use first copy of ligand
|
| 197 |
mols = mols[:1]
|
| 198 |
ligand_mols += mols
|
| 199 |
ligand_names += [res]*len(mols)
|
| 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
|
| 202 |
ligand_smiles = []
|
| 203 |
ligand_xyz = []
|
| 204 |
ligand_xyz_2d = []
|
| 205 |
-
|
| 206 |
-
for mol, name in zip(ligand_mols, ligand_names):
|
| 207 |
print('Processing {} and {}'.format(pdb_name, name))
|
| 208 |
-
smi, xyz, xyz_2d = tokenize_ligand(mol)
|
| 209 |
ligand_smiles.append(smi)
|
| 210 |
ligand_xyz.append(xyz)
|
| 211 |
ligand_xyz_2d.append(xyz_2d)
|
| 212 |
|
| 213 |
-
|
| 214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
except Exception as e:
|
|
|
|
| 216 |
print(repr(e))
|
| 217 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
if __name__ == '__main__':
|
| 219 |
import glob
|
| 220 |
|
| 221 |
filenames = glob.glob('pdb/*/*.gz')
|
| 222 |
filenames = sorted(filenames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
comm = MPI.COMM_WORLD
|
| 224 |
with MPICommExecutor(comm, root=0) as executor:
|
| 225 |
# with MPIPoolExecutor() as executor:
|
| 226 |
if executor is not None:
|
| 227 |
# read ligand table
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
|
| 230 |
-
result = executor.map(partial(process_entry,
|
| 231 |
result = list(result)
|
| 232 |
|
| 233 |
-
|
| 234 |
-
pdb_id = [r[0] for r in result if r is not None for ligand in r[3]]
|
| 235 |
-
seq = [r[1] for r in result if r is not None for ligand in r[3]]
|
| 236 |
-
receptor_xyz = [r[2] for r in result if r is not None for ligand in r[3]]
|
| 237 |
-
lig_id = [l for r in result if r is not None for l in r[3]]
|
| 238 |
-
lig_smiles = [s for r in result if r is not None for s in r[4]]
|
| 239 |
-
lig_xyz = [xyz for r in result if r is not None for xyz in r[5]]
|
| 240 |
-
lig_xyz_2d = [xyz for r in result if r is not None for xyz in r[6]]
|
| 241 |
-
|
| 242 |
-
import pandas as pd
|
| 243 |
-
df = pd.DataFrame({
|
| 244 |
-
'pdb_id': pdb_id,
|
| 245 |
-
'lig_id': lig_id,
|
| 246 |
-
'seq': seq,
|
| 247 |
-
'smiles': lig_smiles,
|
| 248 |
-
'receptor_xyz': receptor_xyz,
|
| 249 |
-
'ligand_xyz': lig_xyz,
|
| 250 |
-
'ligand_xyz_2d': lig_xyz_2d})
|
| 251 |
-
df.to_parquet('data/pdb.parquet',index=False)
|
|
|
|
| 22 |
import numpy as np
|
| 23 |
|
| 24 |
import os
|
| 25 |
+
import random
|
| 26 |
+
import traceback
|
| 27 |
+
|
| 28 |
+
from openfold import data_transforms, protein
|
| 29 |
+
from openfold.residue_constants import aatype_to_str_sequence
|
| 30 |
+
import torch
|
| 31 |
|
| 32 |
# minimum molecular weight to consider sth a ligand
|
| 33 |
mol_wt_cutoff = 100
|
|
|
|
| 41 |
# tokenization regex (Schwaller)
|
| 42 |
molecule_regex = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
|
| 43 |
|
|
|
|
|
|
|
|
|
|
| 44 |
# filter out these common additives which occur in more than 75 complexes in the PDB
|
| 45 |
ubiquitous_ligands = ['PEG', 'ADP', 'FAD', 'NAD', 'ATP', 'MPD', 'NAP', 'GDP', 'MES',
|
| 46 |
'GTP', 'FMN', 'HEC', 'TRS', 'CIT', 'PGE', 'ANP', 'SAH', 'NDP',
|
|
|
|
| 63 |
'PQQ', '9TY', 'DUR', 'PPV', 'SPM', 'SIA', 'DUP', 'GTX', '1PG',
|
| 64 |
'GUN', 'ETF', 'FDP', 'MFU', 'G2P', 'PC', 'DST', 'INI']
|
| 65 |
|
| 66 |
+
def get_protein_sequence_and_coords(receptor, pdb_str):
|
| 67 |
+
chains = [chain.getChid() for chain in receptor.getHierView()]
|
| 68 |
+
|
| 69 |
+
aatype = []
|
| 70 |
+
atom_positions = []
|
| 71 |
+
atom_mask = []
|
| 72 |
+
for chain in chains:
|
| 73 |
+
p = protein.from_pdb_string(pdb_str, chain)
|
| 74 |
+
aatype.append(p.aatype)
|
| 75 |
+
atom_positions.append(p.atom_positions)
|
| 76 |
+
atom_mask.append(p.atom_mask)
|
| 77 |
+
|
| 78 |
+
# concatenate chains
|
| 79 |
+
aatype = np.concatenate(aatype)
|
| 80 |
+
atom_positions = np.concatenate(atom_positions)
|
| 81 |
+
atom_mask = np.concatenate(atom_mask)
|
| 82 |
+
|
| 83 |
+
# determine torsion angles
|
| 84 |
+
features = {'aatype': torch.tensor(aatype),
|
| 85 |
+
'all_atom_positions': torch.tensor(atom_positions),
|
| 86 |
+
'all_atom_mask': torch.tensor(atom_mask)}
|
| 87 |
+
features = data_transforms.atom37_to_torsion_angles()(features)
|
| 88 |
+
features = data_transforms.atom37_to_frames(features)
|
| 89 |
+
features = data_transforms.make_atom14_masks(features)
|
| 90 |
+
features = data_transforms.make_atom14_positions(features)
|
| 91 |
+
features = {k: v.numpy() for k, v in features.items() if isinstance(v, torch.Tensor)}
|
| 92 |
+
|
| 93 |
+
seq = aatype_to_str_sequence(aatype)
|
| 94 |
+
|
| 95 |
+
return seq, features
|
| 96 |
|
| 97 |
def tokenize_ligand(mol):
|
| 98 |
# convert to SMILES and map atoms
|
|
|
|
| 119 |
k = 0
|
| 120 |
conf_2d = AllChem.Compute2DCoords(mol)
|
| 121 |
token_pos_2d = []
|
| 122 |
+
atom_idx = []
|
| 123 |
for i,token in enumerate(masked_tokens):
|
| 124 |
if token != '':
|
| 125 |
token_pos_2d.append(tuple(mol.GetConformer(conf_2d).GetAtomPosition(atom_order[k])))
|
| 126 |
+
atom_idx.append(atom_order[k])
|
| 127 |
k += 1
|
| 128 |
else:
|
| 129 |
token_pos_2d.append((0.,0.,0.))
|
| 130 |
+
atom_idx.append(None)
|
| 131 |
|
| 132 |
+
return smi, token_pos, token_pos_2d, atom_idx
|
| 133 |
|
| 134 |
def read_ligand_expo():
|
| 135 |
"""
|
|
|
|
| 140 |
"""
|
| 141 |
file_name = "Components-smiles-stereo-oe.smi"
|
| 142 |
try:
|
| 143 |
+
df = pd.read_csv(file_name, sep=r"[\t]+",
|
| 144 |
header=None,
|
| 145 |
+
names=["SMILES", "ID", "Name"],
|
| 146 |
+
engine='python')
|
| 147 |
except FileNotFoundError:
|
| 148 |
url = f"http://ligand-expo.rcsb.org/dictionaries/{file_name}"
|
| 149 |
print(url)
|
|
|
|
| 151 |
open('Components-smiles-stereo-oe.smi', 'wb').write(r.content)
|
| 152 |
df = pd.read_csv(file_name, sep="\t",
|
| 153 |
header=None,
|
| 154 |
+
names=["SMILES", "ID", "Name"],
|
| 155 |
+
na_filter=False)
|
| 156 |
+
return df
|
| 157 |
|
| 158 |
|
| 159 |
def get_pdb_components(pdb_id):
|
|
|
|
| 170 |
return protein, ligand
|
| 171 |
|
| 172 |
|
| 173 |
+
def process_ligand(ligand, res_name, df_expo):
|
| 174 |
"""
|
| 175 |
Add bond orders to a pdb ligand
|
| 176 |
1. Select the ligand component with name "res_name"
|
|
|
|
| 181 |
6. Assign the bond orders from the template from step 3
|
| 182 |
:param ligand: ligand as generated by prody
|
| 183 |
:param res_name: residue name of ligand to extract
|
| 184 |
+
:param df_expo: dictionary with LigandExpo
|
| 185 |
:return: molecule with bond orders assigned
|
| 186 |
"""
|
| 187 |
+
sub_smiles = df_expo[df_expo['ID'].values == res_name]['SMILES'].values[0]
|
| 188 |
template = AllChem.MolFromSmiles(sub_smiles)
|
| 189 |
|
| 190 |
allres = ligand.select(f"resname {res_name}")
|
|
|
|
| 199 |
mols.append(AllChem.AssignBondOrdersFromTemplate(template, rd_mol))
|
| 200 |
return mols, template
|
| 201 |
|
| 202 |
+
def rot_from_two_vecs(e0_unnormalized, e1_unnormalized):
|
| 203 |
+
"""Create rotation matrices from unnormalized vectors for the x and y-axes.
|
| 204 |
+
This creates a rotation matrix from two vectors using Gram-Schmidt
|
| 205 |
+
orthogonalization.
|
| 206 |
+
Args:
|
| 207 |
+
e0_unnormalized: vectors lying along x-axis of resulting rotation
|
| 208 |
+
e1_unnormalized: vectors lying in xy-plane of resulting rotation
|
| 209 |
+
Returns:
|
| 210 |
+
Rotations resulting from Gram-Schmidt procedure.
|
| 211 |
+
"""
|
| 212 |
+
# Normalize the unit vector for the x-axis, e0.
|
| 213 |
+
e0 = e0_unnormalized / np.linalg.norm(e0_unnormalized)
|
| 214 |
+
|
| 215 |
+
# make e1 perpendicular to e0.
|
| 216 |
+
c = np.dot(e1_unnormalized, e0)
|
| 217 |
+
e1 = e1_unnormalized - c * e0
|
| 218 |
+
e1 = e1 / np.linalg.norm(e1)
|
| 219 |
+
|
| 220 |
+
# Compute e2 as cross product of e0 and e1.
|
| 221 |
+
e2 = np.cross(e0, e1)
|
| 222 |
+
|
| 223 |
+
# local to space frame
|
| 224 |
+
return np.stack([e0,e1,e2]).T
|
| 225 |
+
|
| 226 |
+
def process_entry(df, pdb_fn):
|
| 227 |
try:
|
| 228 |
"""
|
| 229 |
Slit pdb into protein and ligands,
|
| 230 |
parse protein sequence and ligand tokens
|
| 231 |
+
:param df: ligand expo data
|
| 232 |
:param pdb_fn: pdb entry file name
|
| 233 |
:return:
|
| 234 |
"""
|
|
|
|
| 238 |
|
| 239 |
ligand_mols = []
|
| 240 |
ligand_names = []
|
| 241 |
+
ligand_bonds = []
|
| 242 |
if ligand is not None:
|
| 243 |
# filter ligands by molecular weight
|
| 244 |
res_name_list = list(set(ligand.getResnames()))
|
| 245 |
for res in res_name_list:
|
| 246 |
+
if res in ubiquitous_ligands:
|
| 247 |
+
continue
|
| 248 |
+
mols, template = process_ligand(ligand, res, df)
|
| 249 |
|
| 250 |
mol_wt = ExactMolWt(template)
|
| 251 |
natoms = template.GetNumAtoms()
|
| 252 |
|
| 253 |
+
if mol_wt >= mol_wt_cutoff and natoms >= min_atoms:
|
| 254 |
# only use first copy of ligand
|
| 255 |
mols = mols[:1]
|
| 256 |
ligand_mols += mols
|
| 257 |
ligand_names += [res]*len(mols)
|
| 258 |
|
| 259 |
+
bonds = []
|
| 260 |
+
for b in template.GetBonds():
|
| 261 |
+
bonds.append((b.GetBeginAtomIdx(), b.GetEndAtomIdx()))
|
| 262 |
+
ligand_bonds.append(bonds)
|
| 263 |
+
|
| 264 |
|
| 265 |
ligand_smiles = []
|
| 266 |
ligand_xyz = []
|
| 267 |
ligand_xyz_2d = []
|
| 268 |
+
ligand_token_bonds = []
|
| 269 |
+
for mol, name, bonds in zip(ligand_mols, ligand_names, ligand_bonds):
|
| 270 |
print('Processing {} and {}'.format(pdb_name, name))
|
| 271 |
+
smi, xyz, xyz_2d, atom_idx = tokenize_ligand(mol)
|
| 272 |
ligand_smiles.append(smi)
|
| 273 |
ligand_xyz.append(xyz)
|
| 274 |
ligand_xyz_2d.append(xyz_2d)
|
| 275 |
|
| 276 |
+
ligand_token_bonds.append([ (atom_idx.index(b[0]), atom_idx.index(b[1])) for b in bonds ])
|
| 277 |
+
|
| 278 |
+
pdb_str = StringIO()
|
| 279 |
+
writePDBStream(pdb_str, protein)
|
| 280 |
+
|
| 281 |
+
seq, features = get_protein_sequence_and_coords(protein, pdb_str.getvalue())
|
| 282 |
+
features = { 'rigidgroups_gt_frames': features['rigidgroups_gt_frames'],
|
| 283 |
+
'torsion_angles_sin_cos': features['torsion_angles_sin_cos']}
|
| 284 |
+
return pdb_name, seq, features, ligand_names, ligand_smiles, ligand_xyz, ligand_xyz_2d, ligand_token_bonds
|
| 285 |
except Exception as e:
|
| 286 |
+
print(traceback.format_exc())
|
| 287 |
print(repr(e))
|
| 288 |
|
| 289 |
+
def write_result(fn, data):
|
| 290 |
+
# expand sequences and ligands
|
| 291 |
+
pdb_id = [r[0] for r in data if r is not None for ligand in r[3]]
|
| 292 |
+
seq = [r[1] for r in data if r is not None for ligand in r[3]]
|
| 293 |
+
receptor_features = [r[2] for r in data if r is not None for ligand in r[3]]
|
| 294 |
+
lig_id = [l for r in data if r is not None for l in r[3]]
|
| 295 |
+
lig_smiles = [s for r in data if r is not None for s in r[4]]
|
| 296 |
+
lig_xyz = [xyz for r in data if r is not None for xyz in r[5]]
|
| 297 |
+
lig_xyz_2d = [xyz for r in data if r is not None for xyz in r[6]]
|
| 298 |
+
lig_bonds = [b for r in data if r is not None for b in r[7]]
|
| 299 |
+
|
| 300 |
+
import pandas as pd
|
| 301 |
+
df = pd.DataFrame({
|
| 302 |
+
'pdb_id': pdb_id,
|
| 303 |
+
'lig_id': lig_id,
|
| 304 |
+
'seq': seq,
|
| 305 |
+
'smiles': lig_smiles,
|
| 306 |
+
'receptor_features': receptor_features,
|
| 307 |
+
'ligand_xyz': lig_xyz,
|
| 308 |
+
'ligand_xyz_2d': lig_xyz_2d,
|
| 309 |
+
'ligand_bonds': lig_bonds})
|
| 310 |
+
df.to_pickle(fn)
|
| 311 |
+
|
| 312 |
if __name__ == '__main__':
|
| 313 |
import glob
|
| 314 |
|
| 315 |
filenames = glob.glob('pdb/*/*.gz')
|
| 316 |
filenames = sorted(filenames)
|
| 317 |
+
|
| 318 |
+
random.seed(42)
|
| 319 |
+
random.shuffle(filenames)
|
| 320 |
+
|
| 321 |
+
split_idx = int(0.9*len(filenames))
|
| 322 |
+
train = filenames[:split_idx]
|
| 323 |
+
test = filenames[split_idx:]
|
| 324 |
+
|
| 325 |
comm = MPI.COMM_WORLD
|
| 326 |
with MPICommExecutor(comm, root=0) as executor:
|
| 327 |
# with MPIPoolExecutor() as executor:
|
| 328 |
if executor is not None:
|
| 329 |
# read ligand table
|
| 330 |
+
df = read_ligand_expo()
|
| 331 |
+
|
| 332 |
+
result = executor.map(partial(process_entry, df), train, chunksize=128)
|
| 333 |
+
result = list(result)
|
| 334 |
+
|
| 335 |
+
write_result('data/pdb_train.p', result)
|
| 336 |
|
| 337 |
+
result = executor.map(partial(process_entry, df), test, chunksize=128)
|
| 338 |
result = list(result)
|
| 339 |
|
| 340 |
+
write_result('data/pdb_test.p', result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pdb.slurm
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
#SBATCH -p batch
|
| 4 |
#SBATCH -A STF006
|
| 5 |
#SBATCH -t 3:00:00
|
| 6 |
-
#SBATCH -N
|
| 7 |
#SBATCH --ntasks-per-node=16
|
| 8 |
|
| 9 |
export PYTHONUNBUFFERED=1
|
|
|
|
| 3 |
#SBATCH -p batch
|
| 4 |
#SBATCH -A STF006
|
| 5 |
#SBATCH -t 3:00:00
|
| 6 |
+
#SBATCH -N 128
|
| 7 |
#SBATCH --ntasks-per-node=16
|
| 8 |
|
| 9 |
export PYTHONUNBUFFERED=1
|
pdb_protein_ligand_complexes.py
DELETED
|
@@ -1,131 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
"""TODO: A dataset of protein sequences, ligand SMILES, and complex coordinates."""
|
| 16 |
-
|
| 17 |
-
import huggingface_hub
|
| 18 |
-
import os
|
| 19 |
-
import pyarrow.parquet as pq
|
| 20 |
-
import datasets
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
# TODO: Add BibTeX citation
|
| 24 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 25 |
-
_CITATION = """\
|
| 26 |
-
@InProceedings{huggingface:dataset,
|
| 27 |
-
title = {jglaser/pdb_protein_ligand_complexes},
|
| 28 |
-
author={Jens Glaser, ORNL
|
| 29 |
-
},
|
| 30 |
-
year={2022}
|
| 31 |
-
}
|
| 32 |
-
"""
|
| 33 |
-
|
| 34 |
-
# TODO: Add description of the dataset here
|
| 35 |
-
# You can copy an official description
|
| 36 |
-
_DESCRIPTION = """\
|
| 37 |
-
A dataset to fine-tune language models on protein-ligand complex structures
|
| 38 |
-
"""
|
| 39 |
-
|
| 40 |
-
# TODO: Add a link to an official homepage for the dataset here
|
| 41 |
-
_HOMEPAGE = ""
|
| 42 |
-
|
| 43 |
-
# TODO: Add the licence for the dataset here if you can find it
|
| 44 |
-
_LICENSE = "BSD two-clause"
|
| 45 |
-
|
| 46 |
-
# TODO: Add link to the official dataset URLs here
|
| 47 |
-
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
| 48 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 49 |
-
_URL = "https://huggingface.co/datasets/jglaser/pdb_protein_ligand_complexes/resolve/main/"
|
| 50 |
-
_data_dir = "data/"
|
| 51 |
-
_file_names = {'default': _data_dir+'pdb.parquet'}
|
| 52 |
-
|
| 53 |
-
_URLs = {name: _URL+_file_names[name] for name in _file_names}
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
| 57 |
-
class PDBProteinLigandComplexes(datasets.ArrowBasedBuilder):
|
| 58 |
-
"""List of protein sequences, ligand SMILES, and complex coordinates."""
|
| 59 |
-
|
| 60 |
-
VERSION = datasets.Version("1.3.0")
|
| 61 |
-
|
| 62 |
-
def _info(self):
|
| 63 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
| 64 |
-
#if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
|
| 65 |
-
# features = datasets.Features(
|
| 66 |
-
# {
|
| 67 |
-
# "sentence": datasets.Value("string"),
|
| 68 |
-
# "option1": datasets.Value("string"),
|
| 69 |
-
# "answer": datasets.Value("string")
|
| 70 |
-
# # These are the features of your dataset like images, labels ...
|
| 71 |
-
# }
|
| 72 |
-
# )
|
| 73 |
-
#else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
| 74 |
-
features = datasets.Features(
|
| 75 |
-
{
|
| 76 |
-
"pdb_id": datasets.Value("string"),
|
| 77 |
-
"lig_id": datasets.Value("string"),
|
| 78 |
-
"seq": datasets.Value("string"),
|
| 79 |
-
"smiles": datasets.Value("string"),
|
| 80 |
-
"ligand_xyz": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
|
| 81 |
-
"ligand_xyz_2d": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
|
| 82 |
-
"receptor_xyz": datasets.Sequence(datasets.Sequence(datasets.Value('float32'))),
|
| 83 |
-
# These are the features of your dataset like images, labels ...
|
| 84 |
-
}
|
| 85 |
-
)
|
| 86 |
-
return datasets.DatasetInfo(
|
| 87 |
-
# This is the description that will appear on the datasets page.
|
| 88 |
-
description=_DESCRIPTION,
|
| 89 |
-
# This defines the different columns of the dataset and their types
|
| 90 |
-
features=features, # Here we define them above because they are different between the two configurations
|
| 91 |
-
# If there's a common (input, target) tuple from the features,
|
| 92 |
-
# specify them here. They'll be used if as_supervised=True in
|
| 93 |
-
# builder.as_dataset.
|
| 94 |
-
supervised_keys=None,
|
| 95 |
-
# Homepage of the dataset for documentation
|
| 96 |
-
homepage=_HOMEPAGE,
|
| 97 |
-
# License for the dataset if available
|
| 98 |
-
license=_LICENSE,
|
| 99 |
-
# Citation for the dataset
|
| 100 |
-
citation=_CITATION,
|
| 101 |
-
)
|
| 102 |
-
|
| 103 |
-
def _split_generators(self, dl_manager):
|
| 104 |
-
"""Returns SplitGenerators."""
|
| 105 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 106 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
| 107 |
-
|
| 108 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
| 109 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 110 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 111 |
-
files = dl_manager.download_and_extract(_URLs)
|
| 112 |
-
|
| 113 |
-
return [
|
| 114 |
-
datasets.SplitGenerator(
|
| 115 |
-
# These kwargs will be passed to _generate_examples
|
| 116 |
-
name=datasets.Split.TRAIN,
|
| 117 |
-
gen_kwargs={
|
| 118 |
-
'filepath': files["default"],
|
| 119 |
-
},
|
| 120 |
-
),
|
| 121 |
-
|
| 122 |
-
]
|
| 123 |
-
|
| 124 |
-
def _generate_tables(
|
| 125 |
-
self, filepath
|
| 126 |
-
):
|
| 127 |
-
from pyarrow import fs
|
| 128 |
-
local = fs.LocalFileSystem()
|
| 129 |
-
|
| 130 |
-
for i, f in enumerate([filepath]):
|
| 131 |
-
yield i, pq.read_table(f,filesystem=local)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|