wenkai commited on
Commit
cdf31f1
1 Parent(s): 77b966b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -9,7 +9,7 @@ import spaces
9
  import gradio as gr
10
  from esm_scripts.extract import run_demo
11
  from esm import pretrained, FastaBatchedDataset
12
- from transformers import EsmTokenizer, EsmModel
13
 
14
 
15
  # Load the model
@@ -17,13 +17,13 @@ model = Blip2ProteinMistral(config=FAPMConfig(), esm_size='3b')
17
  model.load_checkpoint("model/checkpoint_mf2.pth")
18
  model.to('cuda')
19
 
20
- # model_esm, alphabet = pretrained.load_model_and_alphabet('esm2_t36_3B_UR50D')
21
- # model_esm.to('cuda')
22
- # model_esm.eval()
23
- tokenizer = EsmTokenizer.from_pretrained("facebook/esm2_t36_3B_UR50D")
24
- model_esm = EsmModel.from_pretrained("facebook/esm2_t36_3B_UR50D")
25
  model_esm.to('cuda')
26
  model_esm.eval()
 
 
 
 
27
 
28
  @spaces.GPU
29
  def generate_caption(protein, prompt):
@@ -35,8 +35,8 @@ def generate_caption(protein, prompt):
35
  # esm_emb = run_demo(protein_name='protein_name', protein_seq=protein,
36
  # model=model_esm, alphabet=alphabet,
37
  # include='per_tok', repr_layers=[36], truncation_seq_length=1024)
38
- '''
39
- protein_name='protein_name'
40
  protein_seq=protein
41
  include='per_tok'
42
  repr_layers=[36]
@@ -98,7 +98,7 @@ def generate_caption(protein, prompt):
98
  with torch.no_grad():
99
  outputs = model_esm(**inputs)
100
  esm_emb = outputs.last_hidden_state.detach()[0]
101
-
102
  print("esm embedding generated")
103
  esm_emb = F.pad(esm_emb.t(), (0, 1024 - len(esm_emb))).t().to('cuda')
104
  print("esm embedding processed")
@@ -117,6 +117,7 @@ description = """Quick demonstration of the FAPM model for protein function pred
117
 
118
  The model used in this app is available at [Hugging Face Model Hub](https://huggingface.co/wenkai/FAPM) and the source code can be found on [GitHub](https://github.com/xiangwenkai/FAPM/tree/main)."""
119
 
 
120
  iface = gr.Interface(
121
  fn=generate_caption,
122
  inputs=[gr.Textbox(type="text", label="Upload sequence"), gr.Textbox(type="text", label="Prompt")],
 
9
  import gradio as gr
10
  from esm_scripts.extract import run_demo
11
  from esm import pretrained, FastaBatchedDataset
12
+ # from transformers import EsmTokenizer, EsmModel
13
 
14
 
15
  # Load the model
 
17
  model.load_checkpoint("model/checkpoint_mf2.pth")
18
  model.to('cuda')
19
 
20
+ model_esm, alphabet = pretrained.load_model_and_alphabet('esm2_t36_3B_UR50D')
 
 
 
 
21
  model_esm.to('cuda')
22
  model_esm.eval()
23
+ # tokenizer = EsmTokenizer.from_pretrained("facebook/esm2_t36_3B_UR50D")
24
+ # model_esm = EsmModel.from_pretrained("facebook/esm2_t36_3B_UR50D")
25
+ # model_esm.to('cuda')
26
+ # model_esm.eval()
27
 
28
  @spaces.GPU
29
  def generate_caption(protein, prompt):
 
35
  # esm_emb = run_demo(protein_name='protein_name', protein_seq=protein,
36
  # model=model_esm, alphabet=alphabet,
37
  # include='per_tok', repr_layers=[36], truncation_seq_length=1024)
38
+
39
+ protein_name='protein_name'
40
  protein_seq=protein
41
  include='per_tok'
42
  repr_layers=[36]
 
98
  with torch.no_grad():
99
  outputs = model_esm(**inputs)
100
  esm_emb = outputs.last_hidden_state.detach()[0]
101
+ '''
102
  print("esm embedding generated")
103
  esm_emb = F.pad(esm_emb.t(), (0, 1024 - len(esm_emb))).t().to('cuda')
104
  print("esm embedding processed")
 
117
 
118
  The model used in this app is available at [Hugging Face Model Hub](https://huggingface.co/wenkai/FAPM) and the source code can be found on [GitHub](https://github.com/xiangwenkai/FAPM/tree/main)."""
119
 
120
+
121
  iface = gr.Interface(
122
  fn=generate_caption,
123
  inputs=[gr.Textbox(type="text", label="Upload sequence"), gr.Textbox(type="text", label="Prompt")],