Spaces:
Runtime error
Runtime error
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """This metric module is designed to solve the task of creating a world model and corresponding keys, | |
| given a formula and whether that formula needs to be satiesfied. Please read my M.Sc. thesis for details.""" | |
| import evaluate | |
| import datasets | |
| from Parser import parse_LLM_output | |
| from nltk.sem.logic import * | |
| import nltk | |
| from nltk.sem.logic import LogicParser | |
| from nltk.sem.evaluate import Valuation, Model | |
| # TODO: Add BibTeX citation | |
| _CITATION = """\ | |
| @InProceedings{huggingface:module, | |
| title = {Teaching LLMs predicate Logic (M.Sc. Thesis)}, | |
| authors={Simon Döbele}, | |
| year={upcoming} | |
| } | |
| """ | |
| # TODO: Add description of the module here | |
| _DESCRIPTION = """\ | |
| This metric module is designed to solve the task of creating a world model and corresponding keys, | |
| given a formula and whether that formula needs to be satiesfied. Please read my M.Sc. thesis for details. | |
| In summary, my compute function behaves like it usually does, | |
| but in order to be able to compare references and predictions, I need to | |
| pass the predictions to a parser. Hence, why I rewrote just the compute function. | |
| """ | |
| # TODO: Add description of the arguments of the module here | |
| _KWARGS_DESCRIPTION = """ | |
| Calculates how good are predictions given some references, using certain scores | |
| Args: | |
| predictions: list of predictions to score. Each predictions | |
| should be a string with tokens separated by spaces. | |
| references: list of references, one for each prediction. Each | |
| reference should be a string with tokens separated by spaces. | |
| formulas: list of strings, needed for the parser to decide | |
| whether the overall construction is valid (satisfied or unsatisfied). | |
| Returns: | |
| accuracy: description of the first score, | |
| another_score: description of the second score, | |
| Examples: | |
| Examples should be written in doctest format, and should illustrate how | |
| to use the function. | |
| >>> my_new_module = evaluate.load("my_new_module") | |
| >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
| >>> print(results) | |
| {'accuracy': 1.0} | |
| """ | |
| # TODO: Define external resources urls if needed (once thesis is published) | |
| # BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
| # def convert_valuation_back(valuation): | |
| # # this is necessary, as jsonl could not serialize sets, but nltk expects sets for predicates. | |
| # return [(v[0], set(v[1])) if v[0].isupper() else v for v in valuation] | |
| # def eval_task1(dataset): | |
| # results = [] | |
| # for generated_output, target, valuation in zip(dataset["Predictions"], dataset["Target-sat"], dataset["Valuation"]): | |
| # try: | |
| # generated_formula = Expression.fromstring(generated_output) | |
| # #print("Parsed output:" + generated_formula) | |
| # valuation = convert_valuation_back(valuation) | |
| # #print("Valuation:" + valuation) | |
| # val = Valuation(valuation) | |
| # dom = val.domain | |
| # m = nltk.sem.evaluate.Model(dom, val) | |
| # g = nltk.sem.Assignment(dom) | |
| # sat = m.evaluate(generated_formula, g) | |
| # if sat == True: | |
| # prediction = "satisfied" | |
| # elif sat == False: | |
| # prediction = "unsatisfied" | |
| # except: | |
| # prediction = "undefined" | |
| # #print("Output:" + prediction + "-----Target:" + target) | |
| # results.append(prediction==target) | |
| # accuracy = sum(results)/len(results) | |
| # return accuracy | |
| class AutoLogicCreateWorld(evaluate.Metric): | |
| """NOTE: only the compute function changes, compared to a normal evaluateMetric.compute() | |
| as well as the datasets.Value (we are working with strings).""" | |
| def _info(self): | |
| # TODO: Specifies the evaluate.EvaluationModuleInfo object | |
| return evaluate.MetricInfo( | |
| # This is the description that will appear on the modules page. | |
| module_type="metric", | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| inputs_description=_KWARGS_DESCRIPTION, | |
| # This defines the format of each prediction and reference | |
| features=datasets.Features({ | |
| 'predictions': datasets.Value('string'), | |
| 'references': datasets.Value('string'), | |
| 'formulas': datasets.Value('string') | |
| }), | |
| # Homepage of the module for documentation | |
| homepage="http://module.homepage", # TODO: change, once thesis is published | |
| # Additional links to the codebase or references | |
| codebase_urls=["http://github.com/path/to/codebase/of/new_module"], # TODO: change, once thesis is published | |
| reference_urls=["http://path.to.reference.url/new_module"] # TODO: change, once thesis is published | |
| ) | |
| def _download_and_prepare(self, dl_manager): | |
| """Optional: download external resources useful to compute the scores""" | |
| # TODO: Download external resources if needed | |
| pass | |
| def _compute(self, predictions, references, formulas): | |
| """Returns the accuracy, given the parsed output.""" | |
| results = [] | |
| for generated_output, target, formula in zip(predictions, references, formulas): | |
| v = parse_LLM_output(generated_output) | |
| #print("Output:" + generated_output) | |
| #print("Parsed output:") | |
| #print(len(v)) | |
| val = Valuation(v) | |
| dom = val.domain | |
| m = nltk.sem.evaluate.Model(dom, val) | |
| g = nltk.sem.Assignment(dom) | |
| sat = m.evaluate(formula, g) | |
| if len(v) == 0: | |
| prediction = "undefined" | |
| elif sat == True: | |
| prediction = "satisfied" | |
| elif sat == False: | |
| prediction = "unsatisfied" | |
| else: | |
| prediction = "undefined" | |
| #print("Output:" + prediction + "-----Target:" + target) | |
| results.append(prediction==target) | |
| accuracy = sum(results)/len(results) | |
| return { | |
| "accuracy": accuracy, | |
| } |