feiyang-cai commited on
Commit
7f3de76
·
1 Parent(s): dd57ba6
Files changed (3) hide show
  1. app.py +1 -1
  2. llama_customized_models.py +0 -2
  3. utils.py +2 -4
app.py CHANGED
@@ -28,7 +28,7 @@ import spaces
28
  #property_names = list(candidate_models.keys())
29
  model = MolecularGenerationModel()
30
 
31
- @spaces.GPU(duration=60)
32
  def predict_single_label(logp, tpas, sas, qed, logp_choose, tpsa_choose, sas_choose, qed_choose):
33
  input_dict = dict()
34
  if logp_choose:
 
28
  #property_names = list(candidate_models.keys())
29
  model = MolecularGenerationModel()
30
 
31
+ @spaces.GPU(duration=120)
32
  def predict_single_label(logp, tpas, sas, qed, logp_choose, tpsa_choose, sas_choose, qed_choose):
33
  input_dict = dict()
34
  if logp_choose:
llama_customized_models.py CHANGED
@@ -130,9 +130,7 @@ class LlamaForCausalLMWithNumericalEmbedding(LlamaForCausalLM):
130
  assert len(properties) == b, "The number of properties should be equal to the batch size."
131
  assert len(properties_index) == b, "The number of properties_index should be equal to the batch size."
132
 
133
- print(input_ids, "input_ids")
134
  embeddings = self.model.embed_tokens(input_ids)
135
- print(embeddings, "embeddings")
136
 
137
  for i, (props, props_index, embeds) in enumerate(zip(properties, properties_index, embeddings)):
138
  assert len(props) == len(props_index), "The number of properties should be equal to the number of properties_index."
 
130
  assert len(properties) == b, "The number of properties should be equal to the batch size."
131
  assert len(properties_index) == b, "The number of properties_index should be equal to the batch size."
132
 
 
133
  embeddings = self.model.embed_tokens(input_ids)
 
134
 
135
  for i, (props, props_index, embeds) in enumerate(zip(properties, properties_index, embeddings)):
136
  assert len(props) == len(props_index), "The number of properties should be equal to the number of properties_index."
utils.py CHANGED
@@ -287,9 +287,7 @@ class MolecularGenerationModel():
287
  with torch.set_grad_enabled(False):
288
  early_stop_flags = torch.zeros(num_generations, dtype=torch.bool).to(self.model.device)
289
  for k in range(steps):
290
- print("batch", batch)
291
  logits = self.model(**batch)['logits']
292
- print("logits", logits)
293
  logits = logits[:, -1, :] / temperature
294
  probs = F.softmax(logits, dim=-1)
295
  ix = torch.multinomial(probs, num_samples=num_generations)
@@ -354,8 +352,8 @@ class MolecularGenerationModel():
354
  # delete the condition columns
355
  new_df = new_df.drop(columns=[col for col in new_df.columns if "condition" in col])
356
 
357
- # drop the empty smiles rows
358
- new_df = new_df.dropna(subset=['SMILES'])
359
 
360
  # convert the measured to 2 decimal places
361
  new_df = new_df.round(2)
 
287
  with torch.set_grad_enabled(False):
288
  early_stop_flags = torch.zeros(num_generations, dtype=torch.bool).to(self.model.device)
289
  for k in range(steps):
 
290
  logits = self.model(**batch)['logits']
 
291
  logits = logits[:, -1, :] / temperature
292
  probs = F.softmax(logits, dim=-1)
293
  ix = torch.multinomial(probs, num_samples=num_generations)
 
352
  # delete the condition columns
353
  new_df = new_df.drop(columns=[col for col in new_df.columns if "condition" in col])
354
 
355
+ # drop the rows
356
+ df = df[df["SMILES"] != ""]
357
 
358
  # convert the measured to 2 decimal places
359
  new_df = new_df.round(2)