Spaces:
Sleeping
Sleeping
Nazarshia2889
commited on
Commit
•
2190be4
1
Parent(s):
45a895d
first push
Browse files- app.py +62 -0
- bcell/config.json +36 -0
- bcell/tf_model.h5 +3 -0
- classifier/config.json +36 -0
- classifier/tf_model.h5 +3 -0
- classifier2/config.json +36 -0
- classifier2/tf_model.h5 +3 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import TFAutoModelForSequenceClassification
|
3 |
+
from transformers import AutoTokenizer
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
# title
|
7 |
+
st.title('Raven AI')
|
8 |
+
|
9 |
+
# text input with label
|
10 |
+
sequence = st.text_input('Enter Amino Acid Sequence')
|
11 |
+
|
12 |
+
model_type = st.radio(
|
13 |
+
"Choose Linear Epitope Classifier",
|
14 |
+
('Linear T-Cells (MHC Class I Restriction)', 'Linear T-Cells (MHC Class II Restriction)', 'Linear B-Cell'))
|
15 |
+
|
16 |
+
# windows length slider
|
17 |
+
length = st.slider('Window Length', 1, 20, 10)
|
18 |
+
threshold = st.slider('Probability Threshold', 0.0, 1.0, 0.5)
|
19 |
+
|
20 |
+
model_checkpoint = "facebook/esm2_t6_8M_UR50D"
|
21 |
+
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
23 |
+
|
24 |
+
if model_type == 'Linear T-Cells (MHC Class I Restriction)':
|
25 |
+
model = TFAutoModelForSequenceClassification.from_pretrained('classifier')
|
26 |
+
elif model_type == 'Linear T-Cells (MHC Class II Restriction)':
|
27 |
+
model = TFAutoModelForSequenceClassification.from_pretrained('classifier2')
|
28 |
+
elif model_type == 'Linear B-Cell':
|
29 |
+
model = TFAutoModelForSequenceClassification.from_pretrained('bcell')
|
30 |
+
# submit button
|
31 |
+
if st.button('Submit'):
|
32 |
+
# run model
|
33 |
+
locations = []
|
34 |
+
for i in range(len(sequence) - length):
|
35 |
+
peptide_name = sequence[i:i+length]
|
36 |
+
peptide = tokenizer(peptide_name, return_tensors="tf")
|
37 |
+
output = model(peptide)
|
38 |
+
locations.append([peptide_name, output.logits.numpy()[0][0]])
|
39 |
+
|
40 |
+
locations = pd.DataFrame(locations, columns = ['Peptide', 'Probability'])
|
41 |
+
|
42 |
+
# display table with sequence and probability as the headers
|
43 |
+
def color_survived(x: float): # x between 0 and 1
|
44 |
+
# red to green scale based on x
|
45 |
+
# 0 -> red
|
46 |
+
# 0.5 -> clear
|
47 |
+
# 1 -> green
|
48 |
+
|
49 |
+
# red
|
50 |
+
if x < threshold:
|
51 |
+
r = 179
|
52 |
+
g = 40
|
53 |
+
b = 2
|
54 |
+
# green
|
55 |
+
else:
|
56 |
+
r = 18
|
57 |
+
g = 150
|
58 |
+
b = 6
|
59 |
+
|
60 |
+
return f'background-color: rgb({r}, {g}, {b})'
|
61 |
+
|
62 |
+
st.table(locations.style.applymap(color_survived, subset=['Probability']))
|
bcell/config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/esm2_t6_8M_UR50D",
|
3 |
+
"architectures": [
|
4 |
+
"EsmForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"emb_layer_norm_before": false,
|
9 |
+
"esmfold_config": null,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.0,
|
12 |
+
"hidden_size": 320,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0"
|
15 |
+
},
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 1280,
|
18 |
+
"is_folding_model": false,
|
19 |
+
"label2id": {
|
20 |
+
"LABEL_0": 0
|
21 |
+
},
|
22 |
+
"layer_norm_eps": 1e-05,
|
23 |
+
"mask_token_id": 32,
|
24 |
+
"max_position_embeddings": 1026,
|
25 |
+
"model_type": "esm",
|
26 |
+
"num_attention_heads": 20,
|
27 |
+
"num_hidden_layers": 6,
|
28 |
+
"pad_token_id": 1,
|
29 |
+
"position_embedding_type": "rotary",
|
30 |
+
"token_dropout": true,
|
31 |
+
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.29.2",
|
33 |
+
"use_cache": true,
|
34 |
+
"vocab_list": null,
|
35 |
+
"vocab_size": 33
|
36 |
+
}
|
bcell/tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc764936c96b97e3e5678e26e08f8e96eb7a1effaabc4b8cc1173471f0c3eb5d
|
3 |
+
size 30211508
|
classifier/config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/esm2_t6_8M_UR50D",
|
3 |
+
"architectures": [
|
4 |
+
"EsmForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"emb_layer_norm_before": false,
|
9 |
+
"esmfold_config": null,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.0,
|
12 |
+
"hidden_size": 320,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0"
|
15 |
+
},
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 1280,
|
18 |
+
"is_folding_model": false,
|
19 |
+
"label2id": {
|
20 |
+
"LABEL_0": 0
|
21 |
+
},
|
22 |
+
"layer_norm_eps": 1e-05,
|
23 |
+
"mask_token_id": 32,
|
24 |
+
"max_position_embeddings": 1026,
|
25 |
+
"model_type": "esm",
|
26 |
+
"num_attention_heads": 20,
|
27 |
+
"num_hidden_layers": 6,
|
28 |
+
"pad_token_id": 1,
|
29 |
+
"position_embedding_type": "rotary",
|
30 |
+
"token_dropout": true,
|
31 |
+
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.29.2",
|
33 |
+
"use_cache": true,
|
34 |
+
"vocab_list": null,
|
35 |
+
"vocab_size": 33
|
36 |
+
}
|
classifier/tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7283cc436b2737289b6174da635b372aaac170458a5772f3d31a851aa076b25c
|
3 |
+
size 30211508
|
classifier2/config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/esm2_t6_8M_UR50D",
|
3 |
+
"architectures": [
|
4 |
+
"EsmForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"emb_layer_norm_before": false,
|
9 |
+
"esmfold_config": null,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.0,
|
12 |
+
"hidden_size": 320,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0"
|
15 |
+
},
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 1280,
|
18 |
+
"is_folding_model": false,
|
19 |
+
"label2id": {
|
20 |
+
"LABEL_0": 0
|
21 |
+
},
|
22 |
+
"layer_norm_eps": 1e-05,
|
23 |
+
"mask_token_id": 32,
|
24 |
+
"max_position_embeddings": 1026,
|
25 |
+
"model_type": "esm",
|
26 |
+
"num_attention_heads": 20,
|
27 |
+
"num_hidden_layers": 6,
|
28 |
+
"pad_token_id": 1,
|
29 |
+
"position_embedding_type": "rotary",
|
30 |
+
"token_dropout": true,
|
31 |
+
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.29.2",
|
33 |
+
"use_cache": true,
|
34 |
+
"vocab_list": null,
|
35 |
+
"vocab_size": 33
|
36 |
+
}
|
classifier2/tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c373b0000c019df443691ee21b3429901181d425c74b89f866bf135cacbcb39d
|
3 |
+
size 30211508
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
transformers
|