saicharan2804 commited on
Commit
d2d6755
1 Parent(s): 816dc36

Code change

Browse files
Files changed (1) hide show
  1. my_metric.py +36 -36
my_metric.py CHANGED
@@ -94,51 +94,51 @@ class my_metric(evaluate.Metric):
94
  evaluator = Evaluator(name = 'Validity')
95
  Validity = evaluator(generated_smiles)
96
 
97
- oracle = Oracle(name = 'QED')
98
- QED = oracle(generated_smiles)
99
 
100
- oracle = Oracle(name = 'SA')
101
- SA = oracle(generated_smiles)
102
 
103
- oracle = Oracle(name = 'MPO')
104
- MPO = oracle(generated_smiles)
105
- MPO = {key: sum(values)/len(values) for key, values in MPO.items()}
106
 
107
  oracle_list = [
108
  'QED', 'SA', 'MPO', '3pbl_docking', 'GSK3B', 'JNK3',
109
  'DRD2', 'LogP', 'Rediscovery', 'Similarity',
110
  'Median', 'Isomers', 'Valsartan_SMARTS', 'Hop'
111
- ]
112
 
113
- # Iterate through each oracle and compute its score
114
- for oracle_name in oracle_list:
115
- oracle = Oracle(name=oracle_name)
116
- if oracle_name in ['Rediscovery', 'MPO', 'Similarity', 'Median', 'Isomers', 'Hop']:
117
- # Assuming these oracles return a dictionary where values are lists of scores
118
- score = oracle(generated_smiles)
119
- if isinstance(score, dict):
120
- # Convert lists of scores to average score for these specific metrics
121
- score = {key: sum(values)/len(values) for key, values in score.items()}
122
- else:
123
- # Assuming other oracles return a list of scores
124
- score = oracle(generated_smiles)
125
- if isinstance(score, list):
126
- # Convert list of scores to average score
127
- score = sum(score) / len(score)
128
-
129
- Results.update({f"PyTDC_{oracle_name}": score})
130
 
131
 
132
- Results.update({
133
- "PyTDC_Diversity": Diversity,
134
- "PyTDC_KL_Divergence": KL_Divergence,
135
- "PyTDC_FCD_Distance": FCD_Distance,
136
- "PyTDC_Novelty": Novelty,
137
- "PyTDC_Validity": Validity,
138
-
139
- "PyTDC_QED": sum(QED)/len(QED),
140
- "PyTDC_SA": sum(SA)/len(SA),
141
- "PyTDC_MPO": MPO
142
- })
143
 
144
  return {"results": Results}
 
94
  evaluator = Evaluator(name = 'Validity')
95
  Validity = evaluator(generated_smiles)
96
 
97
+ # oracle = Oracle(name = 'QED')
98
+ # QED = oracle(generated_smiles)
99
 
100
+ # oracle = Oracle(name = 'SA')
101
+ # SA = oracle(generated_smiles)
102
 
103
+ # oracle = Oracle(name = 'MPO')
104
+ # MPO = oracle(generated_smiles)
105
+ # MPO = {key: sum(values)/len(values) for key, values in MPO.items()}
106
 
107
  oracle_list = [
108
  'QED', 'SA', 'MPO', '3pbl_docking', 'GSK3B', 'JNK3',
109
  'DRD2', 'LogP', 'Rediscovery', 'Similarity',
110
  'Median', 'Isomers', 'Valsartan_SMARTS', 'Hop'
111
+ ]
112
 
113
+ # Iterate through each oracle and compute its score
114
+ for oracle_name in oracle_list:
115
+ oracle = Oracle(name=oracle_name)
116
+ if oracle_name in ['Rediscovery', 'MPO', 'Similarity', 'Median', 'Isomers', 'Hop']:
117
+ # Assuming these oracles return a dictionary where values are lists of scores
118
+ score = oracle(generated_smiles)
119
+ if isinstance(score, dict):
120
+ # Convert lists of scores to average score for these specific metrics
121
+ score = {key: sum(values)/len(values) for key, values in score.items()}
122
+ else:
123
+ # Assuming other oracles return a list of scores
124
+ score = oracle(generated_smiles)
125
+ if isinstance(score, list):
126
+ # Convert list of scores to average score
127
+ score = sum(score) / len(score)
128
+
129
+ Results.update({f"PyTDC_{oracle_name}": score})
130
 
131
 
132
+ # Results.update({
133
+ # "PyTDC_Diversity": Diversity,
134
+ # "PyTDC_KL_Divergence": KL_Divergence,
135
+ # "PyTDC_FCD_Distance": FCD_Distance,
136
+ # "PyTDC_Novelty": Novelty,
137
+ # "PyTDC_Validity": Validity,
138
+
139
+ # "PyTDC_QED": sum(QED)/len(QED),
140
+ # "PyTDC_SA": sum(SA)/len(SA),
141
+ # "PyTDC_MPO": MPO
142
+ # })
143
 
144
  return {"results": Results}