my_metric / my_metric.py
saicharan2804
Code change
8325032
raw
history blame
3.42 kB
import evaluate
import datasets
import moses
from moses import metrics
import pandas as pd
from tdc import Evaluator
_DESCRIPTION = """
Moses and PyTDC metrics
"""
_KWARGS_DESCRIPTION = """
Args:
list_of_generated_smiles (`list` of `string`): Predicted labels.
list_of_test_smiles (`list` of `string`): test.
Returns:
All moses metrics
"""
_CITATION = """
@article{DBLP:journals/corr/abs-1811-12823,
author = {Daniil Polykovskiy and
Alexander Zhebrak and
Benjam{\'{\i}}n S{\'{a}}nchez{-}Lengeling and
Sergey Golovanov and
Oktai Tatanov and
Stanislav Belyaev and
Rauf Kurbanov and
Aleksey Artamonov and
Vladimir Aladinskiy and
Mark Veselov and
Artur Kadurin and
Sergey I. Nikolenko and
Al{\'{a}}n Aspuru{-}Guzik and
Alex Zhavoronkov},
title = {Molecular Sets {(MOSES):} {A} Benchmarking Platform for Molecular
Generation Models},
journal = {CoRR},
volume = {abs/1811.12823},
year = {2018},
url = {http://arxiv.org/abs/1811.12823},
eprinttype = {arXiv},
eprint = {1811.12823},
timestamp = {Fri, 26 Nov 2021 15:34:30 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1811-12823.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class my_metric(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"list_of_generated_smiles": datasets.Sequence(datasets.Value("string")),
"list_of_test_smiles": datasets.Sequence(datasets.Value("string")),
}
if self.config_name == "multilabel"
else {
"list_of_generated_smiles": datasets.Value("string"),
"list_of_test_smiles": datasets.Value("string"),
}
),
reference_urls=["https://github.com/molecularsets/moses"],
)
def _compute(self, generated_smiles,train_smiles):
Results = metrics.get_all_metrics(generated_smiles)
evaluator = Evaluator(name = 'Diversity')
Diversity = evaluator(list_of_generated_smiles)
evaluator = Evaluator(name = 'KL_Divergence')
KL_Divergence = evaluator(generated_smiles, train_smiles)
evaluator = Evaluator(name = 'FCD_Distance')
FCD_Distance = evaluator(generated_smiles, train_smiles)
evaluator = Evaluator(name = 'Novelty')
Novelty = evaluator(generated_smiles, train_smiles)
evaluator = Evaluator(name = 'Validity')
Novelty = evaluator(generated_smiles)
Results.update({
"PyTDC_Diversity": Diversity,
"PyTDC_KL_Divergence": KL_Divergence,
"PyTDC_FCD_Distance": FCD_Distance,
"PyTDC_Novelty": Novelty,
"PyTDC_Validity": Validity
})
return {"results": Results}