Commit
•
87283e2
1
Parent(s):
0e559c4
Fix dataset viewer (#3)
Browse files- Delete dataset_info from README (013210da4ac22fadbd20070a676d8aa345e87f01)
- Delete Python files (58fdf71183e6e8049a132bf53c38b34d1fef8bd7)
- Delete generated_facts.json (610949f573b4f8648417cd8373618252b08a5732)
Co-authored-by: Albert Villanova <albertvillanova@users.noreply.huggingface.co>
- Fin-Fact.py +0 -79
- README.md +0 -14
- anli.py +0 -90
- bart_eval.py +0 -95
- bart_explaination_gen.py +0 -61
- bert_gen.py +0 -83
- data_crawler.py +0 -76
- data_generator.py +0 -171
- generated_facts.json +0 -17
- gpt2_nli.py +0 -70
- pegasus_gen.py +0 -70
- xl_sum_gen.py +0 -80
Fin-Fact.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
"""Fin-Fact dataset."""
|
2 |
-
|
3 |
-
import json
|
4 |
-
import datasets
|
5 |
-
|
6 |
-
_CITATION = """\
|
7 |
-
@misc{rangapur2023finfact,
|
8 |
-
title={Fin-Fact: A Benchmark Dataset for Multimodal Financial Fact Checking and Explanation Generation},
|
9 |
-
author={Aman Rangapur and Haoran Wang and Kai Shu},
|
10 |
-
year={2023},
|
11 |
-
eprint={2309.08793},
|
12 |
-
archivePrefix={arXiv},
|
13 |
-
primaryClass={cs.AI}
|
14 |
-
}
|
15 |
-
"""
|
16 |
-
|
17 |
-
_DESCRIPTION = """\
|
18 |
-
Fin-Fact is a comprehensive dataset designed specifically for financial fact-checking and explanation generation.
|
19 |
-
The dataset consists of 3121 claims spanning multiple financial sectors.
|
20 |
-
"""
|
21 |
-
|
22 |
-
_HOMEPAGE = "https://github.com/IIT-DM/Fin-Fact"
|
23 |
-
_LICENSE = "Apache 2.0"
|
24 |
-
_URL = "https://huggingface.co/datasets/amanrangapur/Fin-Fact/resolve/main/finfact.json"
|
25 |
-
|
26 |
-
class FinFact(datasets.GeneratorBasedBuilder):
|
27 |
-
"""Fin-Fact dataset for financial fact-checking and text generation."""
|
28 |
-
|
29 |
-
VERSION = datasets.Version("1.0.0")
|
30 |
-
BUILDER_CONFIGS = [
|
31 |
-
datasets.BuilderConfig(
|
32 |
-
name="generation",
|
33 |
-
version=VERSION,
|
34 |
-
description="The Fin-Fact dataset for financial fact-checking and text generation",
|
35 |
-
),
|
36 |
-
]
|
37 |
-
|
38 |
-
DEFAULT_CONFIG_NAME = "generation"
|
39 |
-
|
40 |
-
def _info(self):
|
41 |
-
return datasets.DatasetInfo(
|
42 |
-
description=_DESCRIPTION,
|
43 |
-
features=datasets.Features(
|
44 |
-
{
|
45 |
-
"url": datasets.Value("string"),
|
46 |
-
"claim": datasets.Value("string"),
|
47 |
-
"author": datasets.Value("string"),
|
48 |
-
"posted": datasets.Value("string"),
|
49 |
-
"label": datasets.Value("string"),
|
50 |
-
}
|
51 |
-
),
|
52 |
-
supervised_keys=None,
|
53 |
-
homepage=_HOMEPAGE,
|
54 |
-
license=_LICENSE,
|
55 |
-
citation=_CITATION,
|
56 |
-
)
|
57 |
-
|
58 |
-
def _split_generators(self, dl_manager):
|
59 |
-
downloaded_file = dl_manager.download(_URL)
|
60 |
-
return [
|
61 |
-
datasets.SplitGenerator(
|
62 |
-
name=datasets.Split.TRAIN,
|
63 |
-
gen_kwargs={
|
64 |
-
"filepath": downloaded_file,
|
65 |
-
},
|
66 |
-
),
|
67 |
-
]
|
68 |
-
|
69 |
-
def _generate_examples(self, filepath):
|
70 |
-
with open(filepath, encoding="utf-8") as f:
|
71 |
-
data = json.load(f)
|
72 |
-
for id_, row in enumerate(data):
|
73 |
-
yield id_, {
|
74 |
-
"url": row.get("url", ""),
|
75 |
-
"claim": row.get("claim", ""),
|
76 |
-
"author": row.get("author", ""),
|
77 |
-
"posted": row.get("posted", ""),
|
78 |
-
"label": row.get("label", ""),
|
79 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -10,20 +10,6 @@ tags:
|
|
10 |
pretty_name: FinFact
|
11 |
size_categories:
|
12 |
- 1K<n<10K
|
13 |
-
dataset_info:
|
14 |
-
- config_name: generation
|
15 |
-
features:
|
16 |
-
- name: url
|
17 |
-
dtype: string
|
18 |
-
- name: claim
|
19 |
-
dtype: string
|
20 |
-
- name: author
|
21 |
-
dtype: string
|
22 |
-
- name: posted
|
23 |
-
dtype: string
|
24 |
-
- name: label
|
25 |
-
dtype: string
|
26 |
-
|
27 |
---
|
28 |
|
29 |
<h1 align="center">Fin-Fact - Financial Fact-Checking Dataset</h1>
|
|
|
10 |
pretty_name: FinFact
|
11 |
size_categories:
|
12 |
- 1K<n<10K
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
---
|
14 |
|
15 |
<h1 align="center">Fin-Fact - Financial Fact-Checking Dataset</h1>
|
anli.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
2 |
-
import torch
|
3 |
-
import argparse
|
4 |
-
import json
|
5 |
-
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, classification_report, f1_score
|
6 |
-
|
7 |
-
class FactCheckerApp:
|
8 |
-
def __init__(self, hg_model_hub_name='ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli'):
|
9 |
-
# hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
|
10 |
-
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
|
11 |
-
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
|
12 |
-
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
|
13 |
-
# hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
|
14 |
-
|
15 |
-
self.max_length = 248
|
16 |
-
self.tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
|
17 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
|
18 |
-
self.sentences_list = []
|
19 |
-
self.titles_list = []
|
20 |
-
self.labels_list = []
|
21 |
-
self.claim_list = []
|
22 |
-
|
23 |
-
def load_data(self, filename):
|
24 |
-
with open(filename, "r") as infile:
|
25 |
-
self.data = json.load(infile)
|
26 |
-
|
27 |
-
def preprocess_data(self):
|
28 |
-
for entry in self.data:
|
29 |
-
if "data" in entry:
|
30 |
-
self.titles_list.append(entry["title"])
|
31 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def validate_claims(self, threshold=0.5):
|
36 |
-
for title, evidence in zip(self.titles_list, self.sentences_list):
|
37 |
-
tokenized_input_seq_pair = self.tokenizer.encode_plus(evidence, title,
|
38 |
-
max_length=self.max_length,
|
39 |
-
return_token_type_ids=True, truncation=True)
|
40 |
-
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
|
41 |
-
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
|
42 |
-
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
|
43 |
-
outputs = self.model(input_ids,
|
44 |
-
attention_mask=attention_mask,
|
45 |
-
labels=None)
|
46 |
-
predicted_probability = torch.softmax(outputs.logits, dim=1)[0].tolist()
|
47 |
-
entailment_prob = predicted_probability[0]
|
48 |
-
neutral_prob = predicted_probability[1]
|
49 |
-
contradiction_prob = predicted_probability[2]
|
50 |
-
|
51 |
-
if entailment_prob > threshold:
|
52 |
-
is_claim_true = "true"
|
53 |
-
elif neutral_prob > threshold:
|
54 |
-
is_claim_true = "neutral"
|
55 |
-
else:
|
56 |
-
is_claim_true = "false"
|
57 |
-
|
58 |
-
print(is_claim_true)
|
59 |
-
self.claim_list.append(is_claim_true)
|
60 |
-
|
61 |
-
def calculate_metrics(self):
|
62 |
-
precision = precision_score(self.labels_list, self.claim_list, average='macro')
|
63 |
-
accuracy = accuracy_score(self.labels_list, self.claim_list)
|
64 |
-
f1_scoree = f1_score(self.labels_list, self.claim_list, average='macro')
|
65 |
-
conf_matrix = confusion_matrix(self.labels_list, self.claim_list)
|
66 |
-
recall_metric = recall_score(self.labels_list, self.claim_list, pos_label="true", average="macro")
|
67 |
-
cls_report = classification_report(self.labels_list, self.claim_list, labels=["true", "false", "neutral"])
|
68 |
-
return precision, accuracy, f1_scoree, conf_matrix, recall_metric, cls_report
|
69 |
-
|
70 |
-
def parse_args():
|
71 |
-
parser = argparse.ArgumentParser(description="Fact Checker Application")
|
72 |
-
parser.add_argument("--model_name", default="ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli", help="Name of the pre-trained model to use")
|
73 |
-
parser.add_argument("--data_file", required=True, help="Path to the JSON data file")
|
74 |
-
parser.add_argument("--threshold", type=float, default=0.5, help="Threshold for claim validation")
|
75 |
-
return parser.parse_args()
|
76 |
-
|
77 |
-
if __name__ == "__main__":
|
78 |
-
args = parse_args()
|
79 |
-
fact_checker_app = FactCheckerApp(hg_model_hub_name=args.model_name)
|
80 |
-
fact_checker_app.load_data(args.data_file)
|
81 |
-
fact_checker_app.preprocess_data()
|
82 |
-
fact_checker_app.validate_claims(threshold=args.threshold)
|
83 |
-
precision, accuracy, f1_scoree, conf_matrix, recall_metric, cls_report = fact_checker_app.calculate_metrics()
|
84 |
-
print("Precision:", precision)
|
85 |
-
print("Accuracy:", accuracy)
|
86 |
-
print("F1 score:", f1_scoree)
|
87 |
-
print("Recall: ", recall_metric)
|
88 |
-
print("Confusion Matrix:\n", conf_matrix)
|
89 |
-
print("Report:\n", cls_report)
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bart_eval.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
import json, itertools, pyter
|
2 |
-
from nltk.translate.bleu_score import SmoothingFunction, corpus_bleu
|
3 |
-
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self):
|
7 |
-
self.gen_fact_list = []
|
8 |
-
self.evidence_list = []
|
9 |
-
|
10 |
-
def _split_into_words(self, sentences):
|
11 |
-
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
|
12 |
-
|
13 |
-
def _get_word_ngrams(self, n, sentences):
|
14 |
-
assert len(sentences) > 0
|
15 |
-
assert n > 0
|
16 |
-
words = self._split_into_words(sentences)
|
17 |
-
return self._get_ngrams(n, words)
|
18 |
-
|
19 |
-
def _get_ngrams(self, n, text):
|
20 |
-
ngram_set = set()
|
21 |
-
text_length = len(text)
|
22 |
-
max_index_ngram_start = text_length - n
|
23 |
-
for i in range(max_index_ngram_start + 1):
|
24 |
-
ngram_set.add(tuple(text[i:i + n]))
|
25 |
-
return ngram_set
|
26 |
-
|
27 |
-
def load_data(self, filename):
|
28 |
-
with open(filename, "r") as infile:
|
29 |
-
self.data = json.load(infile)
|
30 |
-
|
31 |
-
def get_title_evidence_generated_facts(self):
|
32 |
-
titles = []
|
33 |
-
evidences = []
|
34 |
-
generated_facts = []
|
35 |
-
|
36 |
-
for entry in self.data:
|
37 |
-
titles.append(entry["title"])
|
38 |
-
evidences.append(entry["evidence"])
|
39 |
-
generated_facts.append(entry["generated_fact"])
|
40 |
-
|
41 |
-
return evidences, generated_facts
|
42 |
-
|
43 |
-
def ter(self):
|
44 |
-
ref, gen = self.get_title_evidence_generated_facts()
|
45 |
-
if len(ref) == 1:
|
46 |
-
total_score = pyter.ter(gen[0].split(), ref[0].split())
|
47 |
-
else:
|
48 |
-
total_score = 0
|
49 |
-
for i in range(len(gen)):
|
50 |
-
total_score = total_score + pyter.ter(gen[i].split(), ref[i].split())
|
51 |
-
total_score = total_score/len(gen)
|
52 |
-
return total_score
|
53 |
-
|
54 |
-
def bleu(self):
|
55 |
-
evidence_list, gen_fact_list = self.get_title_evidence_generated_facts()
|
56 |
-
ref_bleu = []
|
57 |
-
gen_bleu = []
|
58 |
-
for l in evidence_list:
|
59 |
-
gen_bleu.append(l.split())
|
60 |
-
for i,l in enumerate(gen_fact_list):
|
61 |
-
ref_bleu.append([l.split()])
|
62 |
-
cc = SmoothingFunction()
|
63 |
-
score_bleu = corpus_bleu(ref_bleu, gen_bleu, weights=(0, 1, 0, 0), smoothing_function=cc.method4)
|
64 |
-
return score_bleu
|
65 |
-
|
66 |
-
def rouge_one(self,n=3):
|
67 |
-
evidence_list, gen_fact_list = self.get_title_evidence_generated_facts()
|
68 |
-
evaluated_ngrams = self._get_word_ngrams(n, evidence_list)
|
69 |
-
reference_ngrams = self._get_word_ngrams(n, gen_fact_list)
|
70 |
-
reference_count = len(reference_ngrams)
|
71 |
-
evaluated_count = len(evaluated_ngrams)
|
72 |
-
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
|
73 |
-
overlapping_count = len(overlapping_ngrams)
|
74 |
-
if evaluated_count == 0:
|
75 |
-
precision = 0.0
|
76 |
-
else:
|
77 |
-
precision = overlapping_count / evaluated_count
|
78 |
-
|
79 |
-
if reference_count == 0:
|
80 |
-
recall = 0.0
|
81 |
-
else:
|
82 |
-
recall = overlapping_count / reference_count
|
83 |
-
|
84 |
-
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
|
85 |
-
return recall
|
86 |
-
|
87 |
-
|
88 |
-
if __name__ == "__main__":
|
89 |
-
fact_generator = NLPFactGenerator()
|
90 |
-
fact_generator.load_data("generated_facts_xlsum.json")
|
91 |
-
rouge_one_score = fact_generator.rouge_one()
|
92 |
-
blue_score = fact_generator.bleu()
|
93 |
-
print(blue_score)
|
94 |
-
print(rouge_one_score)
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bart_explaination_gen.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
from transformers import BartTokenizer, BartForConditionalGeneration
|
2 |
-
import json
|
3 |
-
|
4 |
-
class NLPFactGenerator:
|
5 |
-
def __init__(self, model_name="facebook/bart-large-cnn"):
|
6 |
-
self.max_length = 1024
|
7 |
-
self.model = BartForConditionalGeneration.from_pretrained(model_name)
|
8 |
-
self.tokenizer = BartTokenizer.from_pretrained(model_name)
|
9 |
-
self.sentences_list = []
|
10 |
-
self.justification_list = []
|
11 |
-
self.titles_list = []
|
12 |
-
self.labels_list = []
|
13 |
-
self.claim_list = []
|
14 |
-
|
15 |
-
def load_data(self, filename):
|
16 |
-
with open(filename, "r") as infile:
|
17 |
-
self.data = json.load(infile)
|
18 |
-
|
19 |
-
def preprocess_data(self):
|
20 |
-
max_seq_length = 1024
|
21 |
-
for entry in self.data:
|
22 |
-
if "data" in entry:
|
23 |
-
self.titles_list.append(entry["title"])
|
24 |
-
justification = ' '.join(entry["paragraphs"])
|
25 |
-
for evidence in self.sentences_list:
|
26 |
-
if len(evidence) > max_seq_length:
|
27 |
-
evidence = evidence[:max_seq_length]
|
28 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
29 |
-
self.justification_list.append(justification)
|
30 |
-
self.sentences_list.append(_evidence)
|
31 |
-
self.labels_list.append(entry["label"])
|
32 |
-
|
33 |
-
def generate_fact(self):
|
34 |
-
max_seq_length = 1024
|
35 |
-
generated_facts = []
|
36 |
-
for evidence in self.justification_list:
|
37 |
-
if len(evidence) > max_seq_length:
|
38 |
-
evidence = evidence[:max_seq_length]
|
39 |
-
input_ids = self.tokenizer.encode(evidence, return_tensors="pt")
|
40 |
-
try:
|
41 |
-
generated_ids = self.model.generate(input_ids, max_length=self.max_length, num_return_sequences=1)
|
42 |
-
generated_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
43 |
-
print('Done')
|
44 |
-
print('*'*50)
|
45 |
-
generated_facts.append(generated_text)
|
46 |
-
except:
|
47 |
-
print('Input ID: ', len(input_ids))
|
48 |
-
return generated_facts
|
49 |
-
|
50 |
-
|
51 |
-
if __name__ == "__main__":
|
52 |
-
fact_generator = NLPFactGenerator()
|
53 |
-
fact_generator.load_data("finfact_old.json")
|
54 |
-
fact_generator.preprocess_data()
|
55 |
-
generated_facts = fact_generator.generate_fact()
|
56 |
-
generated_data = []
|
57 |
-
|
58 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
59 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
60 |
-
with open("generated_facts.json", "w") as outfile:
|
61 |
-
json.dump(generated_data, outfile, indent=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bert_gen.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import BertTokenizerFast, EncoderDecoderModel
|
3 |
-
import json
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self, ckpt="mrm8488/bert2bert_shared-german-finetuned-summarization"):
|
7 |
-
self.max_length = 1024
|
8 |
-
self.tokenizer = BertTokenizerFast.from_pretrained(ckpt)
|
9 |
-
self.model = EncoderDecoderModel.from_pretrained(ckpt)
|
10 |
-
self.sentences_list = []
|
11 |
-
self.justification_list = []
|
12 |
-
self.titles_list = []
|
13 |
-
self.labels_list = []
|
14 |
-
self.claim_list = []
|
15 |
-
|
16 |
-
def load_data(self, filename):
|
17 |
-
with open(filename, "r") as infile:
|
18 |
-
self.data = json.load(infile)
|
19 |
-
|
20 |
-
def preprocess_data(self):
|
21 |
-
max_seq_length = 1024
|
22 |
-
for entry in self.data:
|
23 |
-
if "data" in entry:
|
24 |
-
self.titles_list.append(entry["title"])
|
25 |
-
justification = ' '.join(entry["paragraphs"])
|
26 |
-
for evidence in self.sentences_list:
|
27 |
-
if len(evidence) > max_seq_length:
|
28 |
-
evidence = evidence[:max_seq_length]
|
29 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
30 |
-
self.justification_list.append(justification)
|
31 |
-
self.sentences_list.append(_evidence)
|
32 |
-
self.labels_list.append(entry["label"])
|
33 |
-
|
34 |
-
def generate_fact(self):
|
35 |
-
max_seq_length = 1024
|
36 |
-
generated_facts = []
|
37 |
-
count = 0
|
38 |
-
for evidence in self.justification_list:
|
39 |
-
if len(evidence) > max_seq_length:
|
40 |
-
evidence = evidence[:max_seq_length]
|
41 |
-
inputs = self.tokenizer([evidence], padding="max_length", truncation=True, max_length=1024, return_tensors="pt")
|
42 |
-
input_ids = inputs.input_ids
|
43 |
-
attention_mask = inputs.attention_mask
|
44 |
-
try:
|
45 |
-
|
46 |
-
output = self.model.generate(input_ids, attention_mask=attention_mask)
|
47 |
-
summary = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
48 |
-
count+=1
|
49 |
-
print(count)
|
50 |
-
generated_facts.append(summary)
|
51 |
-
except:
|
52 |
-
print('Input ID: ', len(input_ids))
|
53 |
-
return generated_facts
|
54 |
-
|
55 |
-
|
56 |
-
if __name__ == "__main__":
|
57 |
-
fact_generator = NLPFactGenerator()
|
58 |
-
fact_generator.load_data("finfact_old.json")
|
59 |
-
fact_generator.preprocess_data()
|
60 |
-
generated_facts = fact_generator.generate_fact()
|
61 |
-
generated_data = []
|
62 |
-
|
63 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
64 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
65 |
-
with open("generated_facts_bert.json", "w") as outfile:
|
66 |
-
json.dump(generated_data, outfile, indent=4)
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
71 |
-
ckpt = 'mrm8488/bert2bert_shared-german-finetuned-summarization'
|
72 |
-
tokenizer = BertTokenizerFast.from_pretrained(ckpt)
|
73 |
-
model = EncoderDecoderModel.from_pretrained(ckpt).to(device)
|
74 |
-
def generate_summary(text):
|
75 |
-
inputs = tokenizer([text], padding="max_length", truncation=True, max_length=512, return_tensors="pt")
|
76 |
-
input_ids = inputs.input_ids.to(device)
|
77 |
-
attention_mask = inputs.attention_mask.to(device)
|
78 |
-
output = model.generate(input_ids, attention_mask=attention_mask)
|
79 |
-
return tokenizer.decode(output[0], skip_special_tokens=True)
|
80 |
-
|
81 |
-
text = "Your text here..."
|
82 |
-
|
83 |
-
generate_summary(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_crawler.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
from bs4 import BeautifulSoup
|
2 |
-
import pandas as pd
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
-
|
6 |
-
class FactCheckerScraper:
|
7 |
-
def __init__(self):
|
8 |
-
self.authors = []
|
9 |
-
self.statements = []
|
10 |
-
self.sources = []
|
11 |
-
self.targets = []
|
12 |
-
self.href_list = []
|
13 |
-
|
14 |
-
def scrape_website(self, page_number, source):
|
15 |
-
page_num = str(page_number)
|
16 |
-
URL = 'https://www.politifact.com/factchecks/list/?category=income&page={}&source={}'.format(page_num, source)
|
17 |
-
webpage = requests.get(URL)
|
18 |
-
soup = BeautifulSoup(webpage.text, "html.parser")
|
19 |
-
statement_footer = soup.find_all('footer', attrs={'class':'m-statement__footer'})
|
20 |
-
statement_quote = soup.find_all('div', attrs={'class':'m-statement__quote'})
|
21 |
-
statement_meta = soup.find_all('div', attrs={'class':'m-statement__meta'})
|
22 |
-
target = soup.find_all('div', attrs={'class':'m-statement__meter'})
|
23 |
-
|
24 |
-
for i in statement_footer:
|
25 |
-
link1 = i.text.strip()
|
26 |
-
name_and_date = link1.split()
|
27 |
-
first_name = name_and_date[1]
|
28 |
-
last_name = name_and_date[2]
|
29 |
-
full_name = first_name+' '+last_name
|
30 |
-
self.authors.append(full_name)
|
31 |
-
|
32 |
-
for i in statement_quote:
|
33 |
-
link2 = i.find_all('a')
|
34 |
-
self.statements.append(link2[0].text.strip())
|
35 |
-
|
36 |
-
for i in statement_meta:
|
37 |
-
link3 = i.find_all('a')
|
38 |
-
source_text = link3[0].text.strip()
|
39 |
-
self.sources.append(source_text)
|
40 |
-
|
41 |
-
for i in target:
|
42 |
-
fact = i.find('div', attrs={'class':'c-image'}).find('img').get('alt')
|
43 |
-
self.targets.append(fact)
|
44 |
-
|
45 |
-
for i in statement_quote:
|
46 |
-
href = i.find('a')['href']
|
47 |
-
href = 'https://www.politifact.com' + href
|
48 |
-
self.href_list.append(href)
|
49 |
-
|
50 |
-
def scrape_multiple_pages(self, num_pages, source):
|
51 |
-
for i in range(1, num_pages):
|
52 |
-
self.scrape_website(i, source)
|
53 |
-
|
54 |
-
def create_dataframe(self):
|
55 |
-
data = pd.DataFrame(columns=['author', 'statement', 'links', 'source', 'date', 'target'])
|
56 |
-
data['author'] = self.authors
|
57 |
-
data['statement'] = self.statements
|
58 |
-
data['links'] = self.href_list
|
59 |
-
data['source'] = self.sources
|
60 |
-
data['target'] = self.targets
|
61 |
-
return data
|
62 |
-
|
63 |
-
def save_to_json(self, filename):
|
64 |
-
data_json = {
|
65 |
-
"url": self.href_list,
|
66 |
-
"label": self.targets
|
67 |
-
}
|
68 |
-
|
69 |
-
with open(filename, "w") as outfile:
|
70 |
-
json.dump(data_json, outfile)
|
71 |
-
|
72 |
-
if __name__ == "__main__":
|
73 |
-
scraper = FactCheckerScraper()
|
74 |
-
scraper.scrape_multiple_pages(70, source='covid')
|
75 |
-
data = scraper.create_dataframe()
|
76 |
-
scraper.save_to_json("./income.json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_generator.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
from bs4 import BeautifulSoup
|
2 |
-
import datetime
|
3 |
-
import requests
|
4 |
-
import nltk
|
5 |
-
import json
|
6 |
-
|
7 |
-
class WebScraper:
|
8 |
-
def __init__(self, url):
|
9 |
-
self.URL = url
|
10 |
-
try:
|
11 |
-
self.webpage = requests.get(self.URL)
|
12 |
-
except requests.exceptions.RequestException as e:
|
13 |
-
print(f"Error: {e}")
|
14 |
-
if self.webpage:
|
15 |
-
try:
|
16 |
-
self.soup = BeautifulSoup(self.webpage.text, "html.parser")
|
17 |
-
except:
|
18 |
-
print("Error: Failed to create BeautifulSoup object.")
|
19 |
-
|
20 |
-
def remove_unicode(self, string):
|
21 |
-
return string.encode('ascii', 'ignore').decode('utf-8')
|
22 |
-
|
23 |
-
def get_page_title(self):
|
24 |
-
try:
|
25 |
-
div_element = self.soup.find('div', class_='m-statement__quote')
|
26 |
-
title = div_element.get_text(strip=True)
|
27 |
-
cleaned_title = self.remove_unicode(title)
|
28 |
-
cleaned_title = cleaned_title.replace('\"', '')
|
29 |
-
except AttributeError:
|
30 |
-
return None # Error: Failed to get page title.
|
31 |
-
return cleaned_title
|
32 |
-
|
33 |
-
def get_page_author(self):
|
34 |
-
try:
|
35 |
-
author_element = self.soup.find('div', class_='m-author__content').find('a')
|
36 |
-
author = author_element.get_text(strip=True)
|
37 |
-
except AttributeError:
|
38 |
-
return None # Error: Failed to get page author.
|
39 |
-
return author
|
40 |
-
|
41 |
-
def get_page_posted_date(self):
|
42 |
-
date_element = None
|
43 |
-
try:
|
44 |
-
date_element = self.soup.find('span', class_='m-author__date')
|
45 |
-
date = date_element.get_text(strip=True)
|
46 |
-
date_obj = datetime.datetime.strptime(date, "%B %d, %Y")
|
47 |
-
formatted_date = date_obj.strftime("%m/%d/%Y")
|
48 |
-
except (AttributeError, ValueError):
|
49 |
-
return None # Error: Failed to get page posted date.
|
50 |
-
return formatted_date
|
51 |
-
|
52 |
-
def get_sci_check_digest(self):
|
53 |
-
try:
|
54 |
-
div_element = self.soup.find('div', class_='short-on-time')
|
55 |
-
li_tags = div_element.find_all('li') if div_element else []
|
56 |
-
sci_digest_list = [li.get_text(strip=True) for li in li_tags]
|
57 |
-
final_sci_digest = ", ".join(sci_digest_list)
|
58 |
-
cleaned_sci_digest = self.remove_unicode(final_sci_digest)
|
59 |
-
cleaned_sci_digest = cleaned_sci_digest.replace('\"', '')
|
60 |
-
tokenised_sci_digest = nltk.sent_tokenize(cleaned_sci_digest)
|
61 |
-
except AttributeError:
|
62 |
-
return None # Error: Failed to get SciCheck digest.
|
63 |
-
return tokenised_sci_digest
|
64 |
-
|
65 |
-
def get_paragraph_list(self):
|
66 |
-
try:
|
67 |
-
paragraph_list = []
|
68 |
-
article_element = self.soup.find('article', class_='m-textblock')
|
69 |
-
p_elements = article_element.find_all('p')
|
70 |
-
text_list = [p.get_text(strip=True) for p in p_elements]
|
71 |
-
for text in text_list:
|
72 |
-
paragraph_list.append(text)
|
73 |
-
final_paragraphs = " ".join(paragraph_list)
|
74 |
-
cleaned_paragraphs = final_paragraphs.replace('\u00a0', ' ')
|
75 |
-
cleaned_paragraphs = self.remove_unicode(cleaned_paragraphs)
|
76 |
-
cleaned_paragraphs = cleaned_paragraphs.replace('\"', '')
|
77 |
-
tokenized_paragraphs = nltk.sent_tokenize(cleaned_paragraphs)
|
78 |
-
except AttributeError:
|
79 |
-
return None, None # Error: Failed to get paragraphs.
|
80 |
-
return paragraph_list, tokenized_paragraphs
|
81 |
-
|
82 |
-
def get_sentences_citations(self):
|
83 |
-
try:
|
84 |
-
p_elements = self.soup.select('article.m-textblock p')
|
85 |
-
citation_list = []
|
86 |
-
for p in p_elements:
|
87 |
-
href = p.find('a')
|
88 |
-
if href and 'href' in href.attrs:
|
89 |
-
href_text = href['href']
|
90 |
-
sentence = p.get_text(strip=True)
|
91 |
-
cleaned_sentence = sentence.replace('\u00a0', ' ')
|
92 |
-
cleaned_sentence = self.remove_unicode(cleaned_sentence)
|
93 |
-
cleaned_sentence = cleaned_sentence.replace('\"', '')
|
94 |
-
citation_list.append({"sentence": cleaned_sentence, "hrefs": href_text})
|
95 |
-
except AttributeError:
|
96 |
-
return None # Error: Failed to get citation list.
|
97 |
-
return citation_list
|
98 |
-
|
99 |
-
def get_issue_list(self):
|
100 |
-
issue_list = []
|
101 |
-
try:
|
102 |
-
ul_element = self.soup.find('ul', class_='m-list--horizontal')
|
103 |
-
li_elements = ul_element.find_all('li', class_='m-list__item')
|
104 |
-
for li in li_elements[:-1]:
|
105 |
-
category = li.a['title']
|
106 |
-
issue_list.append(category)
|
107 |
-
except AttributeError:
|
108 |
-
return None # Error: Failed to get issue list.
|
109 |
-
return issue_list
|
110 |
-
|
111 |
-
def get_image_info(self):
|
112 |
-
try:
|
113 |
-
article_element = self.soup.find('article', class_='m-textblock')
|
114 |
-
p_elements = article_element.find_all('p')
|
115 |
-
em_elements = article_element.find_all('em')
|
116 |
-
img_count = 0
|
117 |
-
image_captions = []
|
118 |
-
for p in p_elements:
|
119 |
-
img_tag = p.find('img')
|
120 |
-
if img_tag:
|
121 |
-
img_src = img_tag['src']
|
122 |
-
if img_src:
|
123 |
-
img_count += 1
|
124 |
-
if img_count <= len(em_elements):
|
125 |
-
image_caption = em_elements[img_count - 1].get_text(strip=True)
|
126 |
-
cleaned_captions = image_caption.replace('\u00a0', ' ')
|
127 |
-
cleaned_captions = self.remove_unicode(cleaned_captions)
|
128 |
-
cleaned_captions = cleaned_captions.replace('\"', '')
|
129 |
-
image_captions.append({"image_src": img_src, "image_caption": cleaned_captions})
|
130 |
-
except:
|
131 |
-
return None
|
132 |
-
return image_captions
|
133 |
-
|
134 |
-
def get_label(self):
|
135 |
-
try:
|
136 |
-
target = self.soup.find_all('div', attrs={'class':'m-statement__meter'})
|
137 |
-
for i in target:
|
138 |
-
label = i.find('div', attrs={'class':'c-image'}).find('img').get('alt')
|
139 |
-
# if label == 'pants-fire':
|
140 |
-
# label = 'false'
|
141 |
-
# elif label == 'mostly-true':
|
142 |
-
# label = 'true'
|
143 |
-
except:
|
144 |
-
return None
|
145 |
-
return label
|
146 |
-
|
147 |
-
with open("./income.json", "r") as infile:
|
148 |
-
data = json.load(infile)
|
149 |
-
urls = data["url"]
|
150 |
-
labels = data["label"]
|
151 |
-
|
152 |
-
scraped_data = []
|
153 |
-
for url, label in zip(urls,labels):
|
154 |
-
print(url)
|
155 |
-
scraper = WebScraper(url)
|
156 |
-
data = {
|
157 |
-
"url": url,
|
158 |
-
"title": scraper.get_page_title(),
|
159 |
-
"author": scraper.get_page_author(),
|
160 |
-
"posted": scraper.get_page_posted_date(),
|
161 |
-
"sci_digest": scraper.get_sci_check_digest(),
|
162 |
-
"paragraphs": scraper.get_paragraph_list()[1],
|
163 |
-
"issues": scraper.get_issue_list(),
|
164 |
-
"image_data": scraper.get_image_info(),
|
165 |
-
"data": scraper.get_sentences_citations(),
|
166 |
-
"label": label
|
167 |
-
}
|
168 |
-
scraped_data.append(data)
|
169 |
-
|
170 |
-
with open("./json_new/income.json", "w") as outfile:
|
171 |
-
json.dump(scraped_data, outfile)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generated_facts.json
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
[
|
2 |
-
{
|
3 |
-
"title": "Video shows that George Soros is going bankrupt.",
|
4 |
-
"evidence": "IT HAPPENED, the July 18postsays. Elon Musk L.E.A.K.E.D_ GEORGE SOROS IS GOING B.A.N.K.R.U.P.T_, JOE BIDEN S.C.A.RED! - | BREAKING FOX NEWS BIDEN - JULY 19, 2023. This post was flagged as part of Metas efforts to combat false news and misinformation on its News Feed. (Read more about ourpartnership with Meta, which owns Facebook and Instagram.) According to Forbes, Soros wasworth $6.7 billionas of July 20. We found no news reports that hes heading into bankruptcy or that Musk leaked this information. The narrator also refers torecent newsthat Soros nonprofit grant-making network, Open Society Foundations, plans to lay off 40% of its global staff as Soros hands over leadership tohis son, Alex Soros. The organization says it currently employs about 800 people around the world. In a statement, Open Society Foundations said its new operating model is meant to maximize its grantmaking dollars, The Associated Pressreported. The news outlet didnt hint that neither the nonprofit nor Soros is on the verge of bankruptcy. Soros has already given $32 billion to the foundations and has indicatedto The New York Timesthat his fortune will go to his familys philanthropic efforts. Such a gesture hasnt been highly regarded by Twitter owner Elon Musk, who recentlytweetedthat Soros wants to erode the very fabric of civilization.",
|
5 |
-
"generated_fact": "A recent Facebook post claims that billionaire philanthropist George Soros is in the red, but it provides no evidence that this is true. According to Forbes, Soros wasworth $6.7 billionas of July 20. This post was flagged as part of Metas efforts to combat false news and misinformation on its News Feed."
|
6 |
-
},
|
7 |
-
{
|
8 |
-
"title": "Because of Obamacare, Medicare is going broke.",
|
9 |
-
"evidence": "As U.S. House SpeakerPaul Ryandiscussed the repeal and replacement of the Affordable Care Act, a top priority of President-electDonald Trump, he made a claim about the financial health ofMedicare. What people dont realize is, because of Obamacare, Medicare is going broke, the Wisconsin RepublicantoldFox News hostBret Baieron Nov. 10, 2016. The Affordable Care Act certainly has its detractors. When Ryan claimed the same month that Obamacare is not a popular law, our rating wasMostly True. The polls generally showed most Americans had an unfavorable opinion of it. https://www.sharethefacts.co/share/7de6b429-1d9a-42de-ad08-c7998eaa6ca5 Heres howMarc Goldwein, senior policy director at the Committee for a Responsible Federal Budget, lays it out: Two health care policy experts further told us that the combination of spending curbs and revenue increases forMedicare Part A-- which covers hospital, nursing home and home care services, and is financed mainly through payroll taxes -- has extended the solvency of Medicare. Paul Ginsburg, a professor of the practice of health policy and management at the University of Southern California, pointed us to anOctober 2016 reportfrom the nonpartisan Congressional Research Service. The report noted that in 2010, the Medicare trustees projected that Obamacares spending curbs and revenue increases would mean MedicaresHospital Insurance Trust Fund, which finances Part A, would remain solvent until 2029. (The projection has since been revisedto 2028.) Michael Sparer, a professor health policy and management at Columbia University in New York, put it this way, financially speaking: Finally, its worth noting that bothFactcheck.organd the Washington PostFact Checkeralso concluded that Ryans claim is wrong. They both also noted that, even if the Part A trust fund did become depleted in 2028, the program wouldnt be broke, in that it is still set up to cover an estimated 87 percent of expenses.",
|
10 |
-
"generated_fact": "U.S. House Speaker Paul Ryan made a claim about the financial health of Medicare. Ryan: Because of Obamacare, Medicare is going broke. Ryan's claim flies in the face of evidence that it actually helps shore up Medicare. On one side of the ledger, Obamacare slowed the growth of Medicare spending."
|
11 |
-
},
|
12 |
-
{
|
13 |
-
"title": "Back in the Great Recession, when millions of jobs across America hung in the balance, Donald Trump said rescuing the auto industry didnt really matter very much. He said, and I quote again, Let it go. ",
|
14 |
-
"evidence": "Trumps position on an auto bailout was inconsistent leading up to Dec. 19, 2008, when Bush laid out plans to go forward with the bailout. Trump said the government should help the auto companies, but he also regularly suggested the they could save themselves if they filed forChapter 11 bankruptcy,with some government support. Mid 2009, months into his first year as president,ObamaforcedChryslerandGeneral Motorsto go through bankruptcy. Dec. 9, 2008, Fox:Trump told host Greta Van Susteren that I think you have to try and save the companies, and I think you can easily save the companies. He added, referring to bankruptcy, that auto companies don't like the B word. I think they probably should use it. Dec. 10, 2008, CNBC:You have to save the car industry in this country, Trump said. General Motors can be great again, Ford can be great again, and Chrysler could be great. Dec. 17, 2008, Fox:Cavuto asked Trump if he thought the country could do without one of the Big Three auto makers. Trump responded, No. I think you should have the Big Three. I think the government should stand behind them 100 percent. You cannot lose the auto companies. They are great. They make wonderful products. Maybe they are making too much. Maybe they are not making too much. I just bought a Dodge Ram truck from Arrigo Dodge, who is a member of one of my clubs and a great guy. Dec. 18, 2008, CNN:Host Wolf Blitzer asked Trump if Bush should bail out the auto industry. Trump replied, Well, I hope he does it, but you have to make a much better deal with the unions. The fact is you have to make the companies competitive. And if you're not going to make a deal with the union, and a great and fair deal and a competitive deal, then they shouldn't do it. But absolutely, they should try and save the companies. You just can't lose Chrysler, you can't lose Ford, and you can't lose General Motors. You could have let it go, and rebuilt itself, through the free enterprise system, Trump said in August 2015, according to theWashington Post. You could have let it go bankrupt, frankly, and rebuilt itself, and a lot of people felt it should happen. Or you could have done it the way it went. I could have done it either way. Either way would have been acceptable. I think you would have wound up in the same place.",
|
15 |
-
"generated_fact": "Hillary Clinton said at a rally in Michigan that she supported the 2008 government bailout for the American auto industry. But Trump, she said, doesnt support American workers, much less those in MichigansAuto industry. Trump's public comments about the auto industry's demise were the opposite of what Clinton said."
|
16 |
-
}
|
17 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpt2_nli.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from transformers import (
|
2 |
-
GPT2LMHeadModel,
|
3 |
-
GPT2Tokenizer,
|
4 |
-
)
|
5 |
-
import argparse
|
6 |
-
import warnings
|
7 |
-
warnings.filterwarnings("ignore")
|
8 |
-
|
9 |
-
from fact_checking import FactChecker
|
10 |
-
import json
|
11 |
-
from sklearn.metrics import confusion_matrix, classification_report
|
12 |
-
|
13 |
-
class FactCheckerApp:
|
14 |
-
def __init__(self, model_name='fractalego/fact-checking'):
|
15 |
-
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
16 |
-
self.fact_checking_model = GPT2LMHeadModel.from_pretrained(model_name)
|
17 |
-
self.fact_checker = FactChecker(self.fact_checking_model, self.tokenizer)
|
18 |
-
self.sentences_list = []
|
19 |
-
self.titles_list = []
|
20 |
-
self.labels_list = []
|
21 |
-
self.claim_list = []
|
22 |
-
|
23 |
-
def load_data(self, filename):
|
24 |
-
with open(filename, "r") as infile:
|
25 |
-
self.data = json.load(infile)
|
26 |
-
|
27 |
-
def preprocess_data(self):
|
28 |
-
for entry in self.data:
|
29 |
-
if "data" in entry:
|
30 |
-
self.titles_list.append(entry["title"])
|
31 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def validate_claims(self):
|
36 |
-
max_seq_length = 1024
|
37 |
-
for title, evidence in zip(self.titles_list, self.sentences_list):
|
38 |
-
try:
|
39 |
-
if len(title) > max_seq_length:
|
40 |
-
title = title[:max_seq_length]
|
41 |
-
if len(evidence) > max_seq_length:
|
42 |
-
evidence = evidence[:max_seq_length]
|
43 |
-
print(len(evidence))
|
44 |
-
is_claim_true = self.fact_checker.validate(evidence, title)
|
45 |
-
print(is_claim_true)
|
46 |
-
self.claim_list.append(is_claim_true)
|
47 |
-
except IndexError:
|
48 |
-
self.claim_list.append(None)
|
49 |
-
|
50 |
-
def calculate_metrics(self):
|
51 |
-
conf_matrix = confusion_matrix(self.labels_list, [str(is_claim).lower() for is_claim in self.claim_list])
|
52 |
-
cls_report = classification_report(self.labels_list, [str(is_claim).lower() for is_claim in self.claim_list], labels=["true", "false", "neutral"])
|
53 |
-
|
54 |
-
return conf_matrix, cls_report
|
55 |
-
|
56 |
-
def parse_args():
|
57 |
-
parser = argparse.ArgumentParser(description="Fact Checker Application")
|
58 |
-
parser.add_argument("--model_name", default="fractalego/fact-checking", help="Name of the fact-checking model to use")
|
59 |
-
parser.add_argument("--data_file", required=True, help="Path to the JSON data file")
|
60 |
-
return parser.parse_args()
|
61 |
-
|
62 |
-
if __name__ == "__main__":
|
63 |
-
args = parse_args()
|
64 |
-
fact_checker_app = FactCheckerApp(model_name=args.model_name)
|
65 |
-
fact_checker_app.load_data(args.data_file)
|
66 |
-
fact_checker_app.preprocess_data()
|
67 |
-
fact_checker_app.validate_claims()
|
68 |
-
conf_matrix, cls_report = fact_checker_app.calculate_metrics()
|
69 |
-
print("Confusion Matrix:\n", conf_matrix)
|
70 |
-
print("Report:\n", cls_report)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pegasus_gen.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from transformers import PegasusTokenizer, PegasusForConditionalGeneration
|
2 |
-
import json
|
3 |
-
|
4 |
-
class NLPFactGenerator:
|
5 |
-
def __init__(self, model_name="human-centered-summarization/financial-summarization-pegasus"):
|
6 |
-
self.max_length = 1024
|
7 |
-
self.tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
8 |
-
self.model = PegasusForConditionalGeneration.from_pretrained(model_name)
|
9 |
-
self.sentences_list = []
|
10 |
-
self.justification_list = []
|
11 |
-
self.titles_list = []
|
12 |
-
self.labels_list = []
|
13 |
-
self.claim_list = []
|
14 |
-
|
15 |
-
def load_data(self, filename):
|
16 |
-
with open(filename, "r") as infile:
|
17 |
-
self.data = json.load(infile)
|
18 |
-
|
19 |
-
def preprocess_data(self):
|
20 |
-
max_seq_length = 1024
|
21 |
-
for entry in self.data:
|
22 |
-
if "data" in entry:
|
23 |
-
self.titles_list.append(entry["title"])
|
24 |
-
justification = ' '.join(entry["paragraphs"])
|
25 |
-
for evidence in self.sentences_list:
|
26 |
-
if len(evidence) > max_seq_length:
|
27 |
-
evidence = evidence[:max_seq_length]
|
28 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
29 |
-
self.justification_list.append(justification)
|
30 |
-
self.sentences_list.append(_evidence)
|
31 |
-
self.labels_list.append(entry["label"])
|
32 |
-
|
33 |
-
def generate_fact(self):
|
34 |
-
max_seq_length = 1024
|
35 |
-
generated_facts = []
|
36 |
-
count = 0
|
37 |
-
for evidence in self.justification_list:
|
38 |
-
if len(evidence) > max_seq_length:
|
39 |
-
evidence = evidence[:max_seq_length]
|
40 |
-
input_ids = self.tokenizer(evidence, return_tensors="pt").input_ids
|
41 |
-
try:
|
42 |
-
output = self.model.generate(
|
43 |
-
input_ids,
|
44 |
-
max_length=64,
|
45 |
-
num_beams=5,
|
46 |
-
early_stopping=True
|
47 |
-
)
|
48 |
-
summary = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
49 |
-
count+=1
|
50 |
-
print(count)
|
51 |
-
|
52 |
-
generated_facts.append(summary)
|
53 |
-
except:
|
54 |
-
print('Input ID: ', len(input_ids))
|
55 |
-
return generated_facts
|
56 |
-
|
57 |
-
|
58 |
-
if __name__ == "__main__":
|
59 |
-
fact_generator = NLPFactGenerator()
|
60 |
-
fact_generator.load_data("finfact_old.json")
|
61 |
-
fact_generator.preprocess_data()
|
62 |
-
generated_facts = fact_generator.generate_fact()
|
63 |
-
generated_data = []
|
64 |
-
|
65 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
66 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
67 |
-
with open("generated_facts_pegasus.json", "w") as outfile:
|
68 |
-
json.dump(generated_data, outfile, indent=4)
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xl_sum_gen.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
-
import json
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self, model_name="csebuetnlp/mT5_multilingual_XLSum"):
|
7 |
-
self.max_length = 1024
|
8 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
-
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
10 |
-
self.WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
|
11 |
-
self.sentences_list = []
|
12 |
-
self.justification_list = []
|
13 |
-
self.titles_list = []
|
14 |
-
self.labels_list = []
|
15 |
-
self.claim_list = []
|
16 |
-
|
17 |
-
def load_data(self, filename):
|
18 |
-
with open(filename, "r") as infile:
|
19 |
-
self.data = json.load(infile)
|
20 |
-
|
21 |
-
def preprocess_data(self):
|
22 |
-
max_seq_length = 1024
|
23 |
-
for entry in self.data:
|
24 |
-
if "data" in entry:
|
25 |
-
self.titles_list.append(entry["title"])
|
26 |
-
justification = ' '.join(entry["paragraphs"])
|
27 |
-
for evidence in self.sentences_list:
|
28 |
-
if len(evidence) > max_seq_length:
|
29 |
-
evidence = evidence[:max_seq_length]
|
30 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
31 |
-
self.justification_list.append(justification)
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def generate_fact(self):
|
36 |
-
max_seq_length = 1024
|
37 |
-
generated_facts = []
|
38 |
-
count = 0
|
39 |
-
for evidence in self.justification_list:
|
40 |
-
if len(evidence) > max_seq_length:
|
41 |
-
evidence = evidence[:max_seq_length]
|
42 |
-
input_ids = self.tokenizer(
|
43 |
-
[self.WHITESPACE_HANDLER(evidence)],
|
44 |
-
return_tensors="pt",
|
45 |
-
padding="max_length",
|
46 |
-
truncation=True,
|
47 |
-
max_length=1024)["input_ids"]
|
48 |
-
try:
|
49 |
-
output_ids = self.model.generate(
|
50 |
-
input_ids=input_ids,
|
51 |
-
max_length=128,
|
52 |
-
no_repeat_ngram_size=2,
|
53 |
-
num_beams=4)[0]
|
54 |
-
summary = self.tokenizer.decode(
|
55 |
-
output_ids,
|
56 |
-
skip_special_tokens=True,
|
57 |
-
clean_up_tokenization_spaces=False)
|
58 |
-
count+=1
|
59 |
-
print(count)
|
60 |
-
generated_facts.append(summary)
|
61 |
-
except:
|
62 |
-
print('Input ID: ', len(input_ids))
|
63 |
-
return generated_facts
|
64 |
-
|
65 |
-
|
66 |
-
if __name__ == "__main__":
|
67 |
-
fact_generator = NLPFactGenerator()
|
68 |
-
fact_generator.load_data("finfact_old.json")
|
69 |
-
fact_generator.preprocess_data()
|
70 |
-
generated_facts = fact_generator.generate_fact()
|
71 |
-
generated_data = []
|
72 |
-
|
73 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
74 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
75 |
-
with open("generated_facts_xlsum.json", "w") as outfile:
|
76 |
-
json.dump(generated_data, outfile, indent=4)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|