Spaces:
Runtime error
Runtime error
Commit
·
05564bf
1
Parent(s):
9c4ac48
Update app.py
Browse files
app.py
CHANGED
@@ -81,8 +81,10 @@ def predict_structure(prefix, feature_dict, model_runners, random_seed=0):
|
|
81 |
f.write(protein.to_pdb(unrelaxed_protein))
|
82 |
return plddts
|
83 |
|
84 |
-
|
85 |
def run_protgpt2(startsequence, length, repetitionPenalty, top_k_poolsize, max_seqs):
|
|
|
|
|
86 |
protgpt2 = pl("text-generation", model="nferruz/ProtGPT2")
|
87 |
sequences = protgpt2(
|
88 |
startsequence,
|
@@ -94,14 +96,14 @@ def run_protgpt2(startsequence, length, repetitionPenalty, top_k_poolsize, max_s
|
|
94 |
eos_token_id=0,
|
95 |
)
|
96 |
print("Cleaning up after protGPT2")
|
97 |
-
print(gpu_usage())
|
98 |
#torch.cuda.empty_cache()
|
99 |
-
device = cuda.get_current_device()
|
100 |
-
device.reset()
|
101 |
-
print(gpu_usage())
|
102 |
return sequences
|
103 |
|
104 |
-
|
105 |
def run_alphafold(startsequence):
|
106 |
print(gpu_usage())
|
107 |
model_runners = {}
|
@@ -124,8 +126,7 @@ def run_alphafold(startsequence):
|
|
124 |
**mk_mock_template(query_sequence),
|
125 |
}
|
126 |
plddts = predict_structure("test", feature_dict, model_runners)
|
127 |
-
print("
|
128 |
-
print(gpu_usage())
|
129 |
#backend = jax.lib.xla_bridge.get_backend()
|
130 |
#for buf in backend.live_buffers(): buf.delete()
|
131 |
#device = cuda.get_current_device()
|
@@ -137,7 +138,7 @@ def run_alphafold(startsequence):
|
|
137 |
def update_protGPT2(inp, length,repetitionPenalty, top_k_poolsize, max_seqs):
|
138 |
startsequence = inp
|
139 |
seqlen = length
|
140 |
-
generated_seqs = run_protgpt2(startsequence, seqlen, repetitionPenalty, top_k_poolsize, max_seqs)
|
141 |
gen_seqs = [x["generated_text"] for x in generated_seqs]
|
142 |
print(gen_seqs)
|
143 |
sequencestxt = ""
|
@@ -151,7 +152,8 @@ def update_protGPT2(inp, length,repetitionPenalty, top_k_poolsize, max_seqs):
|
|
151 |
def update(inp):
|
152 |
print("Running AF on", inp)
|
153 |
startsequence = inp
|
154 |
-
|
|
|
155 |
print(plddts)
|
156 |
x = np.arange(10)
|
157 |
#plt.style.use(["seaborn-ticks", "seaborn-talk"])
|
|
|
81 |
f.write(protein.to_pdb(unrelaxed_protein))
|
82 |
return plddts
|
83 |
|
84 |
+
@ray.remote
|
85 |
def run_protgpt2(startsequence, length, repetitionPenalty, top_k_poolsize, max_seqs):
|
86 |
+
print("running protgpt2"
|
87 |
+
print(gpu_usage())
|
88 |
protgpt2 = pl("text-generation", model="nferruz/ProtGPT2")
|
89 |
sequences = protgpt2(
|
90 |
startsequence,
|
|
|
96 |
eos_token_id=0,
|
97 |
)
|
98 |
print("Cleaning up after protGPT2")
|
99 |
+
#print(gpu_usage())
|
100 |
#torch.cuda.empty_cache()
|
101 |
+
#device = cuda.get_current_device()
|
102 |
+
#device.reset()
|
103 |
+
#print(gpu_usage())
|
104 |
return sequences
|
105 |
|
106 |
+
@ray.remote
|
107 |
def run_alphafold(startsequence):
|
108 |
print(gpu_usage())
|
109 |
model_runners = {}
|
|
|
126 |
**mk_mock_template(query_sequence),
|
127 |
}
|
128 |
plddts = predict_structure("test", feature_dict, model_runners)
|
129 |
+
print("AF2 done")
|
|
|
130 |
#backend = jax.lib.xla_bridge.get_backend()
|
131 |
#for buf in backend.live_buffers(): buf.delete()
|
132 |
#device = cuda.get_current_device()
|
|
|
138 |
def update_protGPT2(inp, length,repetitionPenalty, top_k_poolsize, max_seqs):
|
139 |
startsequence = inp
|
140 |
seqlen = length
|
141 |
+
generated_seqs = ray.get(run_protgpt2.remote(startsequence, seqlen, repetitionPenalty, top_k_poolsize, max_seqs))
|
142 |
gen_seqs = [x["generated_text"] for x in generated_seqs]
|
143 |
print(gen_seqs)
|
144 |
sequencestxt = ""
|
|
|
152 |
def update(inp):
|
153 |
print("Running AF on", inp)
|
154 |
startsequence = inp
|
155 |
+
# run alphafold using ray
|
156 |
+
plddts = ray.get(run_alphafold.remote(startsequence))
|
157 |
print(plddts)
|
158 |
x = np.arange(10)
|
159 |
#plt.style.use(["seaborn-ticks", "seaborn-talk"])
|