Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Language Creators:
unknown
Annotations Creators:
unknown
Tags:
License:
File size: 3,167 Bytes
94d34e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# -*- coding: utf-8 -*-

import sys
import json
import spacy

from nltk.stem.snowball import SnowballStemmer as Stemmer

nlp = spacy.load("fr_core_news_sm")

# https://spacy.io/usage/linguistic-features#native-tokenizer-additions

from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
from spacy.util import compile_infix_regex

# Modify tokenizer infix patterns
infixes = (
    LIST_ELLIPSES
    + LIST_ICONS
    + [
        r"(?<=[0-9])[+\-\*^](?=[0-9-])",
        r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
            al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
        ),
        r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
        # ✅ Commented out regex that splits on hyphens between letters:
        # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
        r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
    ]
)

infix_re = compile_infix_regex(infixes)
nlp.tokenizer.infix_finditer = infix_re.finditer


def contains(subseq, inseq):
    return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))


def find_pmru(tok_title, tok_text, tok_kp):
    """Find PRMU category of a given keyphrase."""

    # if kp is present
    if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
        return "P"

    # if kp is considered as absent
    else:

        # find present and absent words
        present_words = [w for w in tok_kp if w in tok_title or w in tok_text]

        # if "all" words are present
        if len(present_words) == len(tok_kp):
            return "R"
        # if "some" words are present
        elif len(present_words) > 0:
            return "M"
        # if "no" words are present
        else:
            return "U"


if __name__ == '__main__':

    data = []

    # read the dataset
    with open(sys.argv[1], 'r') as f:
        # loop through the documents
        for line in f:
            doc = json.loads(line.strip())

            print(doc['id'])

            title_spacy = nlp(doc['title'])
            abstract_spacy = nlp(doc['abstract'])

            title_tokens = [token.text for token in title_spacy]
            abstract_tokens = [token.text for token in abstract_spacy]

            title_stems = [Stemmer('french').stem(w.lower()) for w in title_tokens]
            abstract_stems = [Stemmer('french').stem(w.lower()) for w in abstract_tokens]

            keyphrases_stems = []
            for keyphrase in doc['keyphrases']:
                keyphrase_spacy = nlp(keyphrase)
                keyphrase_tokens = [token.text for token in keyphrase_spacy]
                keyphrase_stems = [Stemmer('french').stem(w.lower()) for w in keyphrase_tokens]
                keyphrases_stems.append(keyphrase_stems)

            prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]

            if doc['prmu'] != prmu:
                print("PRMU categories are not identical!")

            doc['prmu'] = prmu
            data.append(json.dumps(doc))

    # write the json
    with open(sys.argv[2], 'w') as o:
        o.write("\n".join(data))