#!/usr/bin/env python # coding: utf-8 # In[1]: from this import d from datasets import load_dataset, load_from_disk import spacy import re # from spacy.lang.en import English from spacy.tokenizer import _get_regex_pattern from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS from spacy.util import compile_infix_regex from nltk.stem.snowball import SnowballStemmer as Stemmer import numpy as np import sys # In[2]: print("LOADING DATASET") dataset = load_dataset("json", data_files={"test":"data.jsonl"}) # In[3]: nlp = spacy.load("en_core_web_sm") re_token_match = _get_regex_pattern(nlp.Defaults.token_match) re_token_match = f"({re_token_match}|\w+-\w+)" nlp.tokenizer.token_match = re.compile(re_token_match).match # Modify tokenizer infix patterns infixes = ( LIST_ELLIPSES + LIST_ICONS + [ r"(?<=[0-9])[+\-\*^](?=[0-9-])", r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES ), r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), # ✅ Commented out regex that splits on hyphens between letters: # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS), r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), ] ) infix_re = compile_infix_regex(infixes) nlp.tokenizer.infix_finditer = infix_re.finditer # In[5]: def contains(subseq, inseq): return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1)) def find_pmru(tok_title, tok_text, tok_kp): """Find PRMU category of a given keyphrase.""" # if kp is present if contains(tok_kp, tok_title) or contains(tok_kp, tok_text): return "P" # if kp is considered as absent else: # find present and absent words present_words = [w for w in tok_kp if w in tok_title or w in tok_text] # if "all" words are present if len(present_words) == len(tok_kp): return "R" # if "some" words are present elif len(present_words) > 0: return "M" # if "no" words are present else: return "U" return prmu def tokenize(dataset): keyphrases_stems= [] for keyphrase in dataset["keyphrases"]: keyphrase_spacy = nlp(keyphrase) keyphrase_tokens = [token.text for token in keyphrase_spacy] keyphrase_stems = [Stemmer('porter').stem(w.lower()) for w in keyphrase_tokens] keyphrase_stems = " ".join(keyphrase_stems) keyphrases_stems.append(keyphrase_stems) dataset["tokenized_keyphrases"] = keyphrases_stems return dataset """ Function that tokenizes the dataset (title, text and keyphrases) and runs the prmu algorithm. """ def prmu_dataset(dataset): title_spacy = nlp(dataset['title']) abstract_spacy = nlp(dataset['text']) title_tokens = [token.text for token in title_spacy] abstract_tokens = [token.text for token in abstract_spacy] title_stems = [Stemmer('porter').stem(w.lower()) for w in title_tokens] abstract_stems = [Stemmer('porter').stem(w.lower()) for w in abstract_tokens] prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in dataset["tokenized_keyphrases"]] dataset['prmu'] = prmu return dataset # In[6]: print("TOKENIZATION") dataset = dataset.map(tokenize,num_proc=sys.argv[1]) print("GETTING PRMU") dataset = dataset.map(prmu_dataset,num_proc=sys.argv[1]) dataset["test"].to_json("data.jsonl")