Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Language Creators:
unknown
Annotations Creators:
unknown
Tags:
License:
taln-archives / prmu.py
boudinfl's picture
Adding stats + prmu scripts
94d34e8
# -*- coding: utf-8 -*-
import sys
import json
import spacy
from nltk.stem.snowball import SnowballStemmer as Stemmer
nlp = spacy.load("fr_core_news_sm")
# https://spacy.io/usage/linguistic-features#native-tokenizer-additions
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
from spacy.util import compile_infix_regex
# Modify tokenizer infix patterns
infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
# ✅ Commented out regex that splits on hyphens between letters:
# r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
infix_re = compile_infix_regex(infixes)
nlp.tokenizer.infix_finditer = infix_re.finditer
def contains(subseq, inseq):
return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
def find_pmru(tok_title, tok_text, tok_kp):
"""Find PRMU category of a given keyphrase."""
# if kp is present
if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
return "P"
# if kp is considered as absent
else:
# find present and absent words
present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
# if "all" words are present
if len(present_words) == len(tok_kp):
return "R"
# if "some" words are present
elif len(present_words) > 0:
return "M"
# if "no" words are present
else:
return "U"
if __name__ == '__main__':
data = []
# read the dataset
with open(sys.argv[1], 'r') as f:
# loop through the documents
for line in f:
doc = json.loads(line.strip())
print(doc['id'])
title_spacy = nlp(doc['title'])
abstract_spacy = nlp(doc['abstract'])
title_tokens = [token.text for token in title_spacy]
abstract_tokens = [token.text for token in abstract_spacy]
title_stems = [Stemmer('french').stem(w.lower()) for w in title_tokens]
abstract_stems = [Stemmer('french').stem(w.lower()) for w in abstract_tokens]
keyphrases_stems = []
for keyphrase in doc['keyphrases']:
keyphrase_spacy = nlp(keyphrase)
keyphrase_tokens = [token.text for token in keyphrase_spacy]
keyphrase_stems = [Stemmer('french').stem(w.lower()) for w in keyphrase_tokens]
keyphrases_stems.append(keyphrase_stems)
prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
if doc['prmu'] != prmu:
print("PRMU categories are not identical!")
doc['prmu'] = prmu
data.append(json.dumps(doc))
# write the json
with open(sys.argv[2], 'w') as o:
o.write("\n".join(data))