simonduerr commited on
Commit
4ea051a
1 Parent(s): 4404556

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -1
app.py CHANGED
@@ -9,6 +9,7 @@ import matplotlib
9
  matplotlib.use("Agg")
10
  import matplotlib.pyplot as plt
11
  from transformers import pipeline as pl
 
12
 
13
  import pandas as pd
14
  import numpy as np
@@ -16,7 +17,7 @@ import matplotlib.pyplot as plt
16
  import sys
17
  import plotly.graph_objects as go
18
  import torch
19
-
20
  print('GPU available',torch.cuda.is_available())
21
  print('__CUDA Device Name:',torch.cuda.get_device_name(0))
22
  print(os.getcwd())
@@ -90,12 +91,16 @@ def run_protgpt2(startsequence, length, repetitionPenalty, top_k_poolsize, max_s
90
  num_return_sequences=max_seqs,
91
  eos_token_id=0,
92
  )
 
93
  del protgpt2
94
  torch.cuda.empty_cache()
 
 
95
  return sequences
96
 
97
 
98
  def run_alphafold(startsequence):
 
99
  model_runners = {}
100
  models = ["model_1"] # ,"model_2","model_3","model_4","model_5"]
101
  for model_name in models:
@@ -116,6 +121,9 @@ def run_alphafold(startsequence):
116
  **mk_mock_template(query_sequence),
117
  }
118
  plddts = predict_structure("test", feature_dict, model_runners)
 
 
 
119
  return plddts["model_1"]
120
 
121
 
 
9
  matplotlib.use("Agg")
10
  import matplotlib.pyplot as plt
11
  from transformers import pipeline as pl
12
+ from GPUtil import showUtilization as gpu_usage
13
 
14
  import pandas as pd
15
  import numpy as np
 
17
  import sys
18
  import plotly.graph_objects as go
19
  import torch
20
+ import gc
21
  print('GPU available',torch.cuda.is_available())
22
  print('__CUDA Device Name:',torch.cuda.get_device_name(0))
23
  print(os.getcwd())
 
91
  num_return_sequences=max_seqs,
92
  eos_token_id=0,
93
  )
94
+ print(gpu_usage())
95
  del protgpt2
96
  torch.cuda.empty_cache()
97
+ gc.collect()
98
+ print(gpu_usage())
99
  return sequences
100
 
101
 
102
  def run_alphafold(startsequence):
103
+ print(gpu_usage())
104
  model_runners = {}
105
  models = ["model_1"] # ,"model_2","model_3","model_4","model_5"]
106
  for model_name in models:
 
121
  **mk_mock_template(query_sequence),
122
  }
123
  plddts = predict_structure("test", feature_dict, model_runners)
124
+ del model_runners
125
+ gc.collect()
126
+ print(gpu_usage())
127
  return plddts["model_1"]
128
 
129