|
|
|
|
|
import lemminflect |
|
from nltk.corpus import stopwords |
|
import nltk |
|
|
|
import re |
|
from typing import Pattern, List, Dict, Tuple |
|
from typing import Optional |
|
import json |
|
import numpy as np |
|
|
|
from . import sentence_pattern |
|
|
|
import logging |
|
|
|
logger = logging.getLogger("rewriting") |
|
|
|
def match_target(target: str, phrase: str) -> bool: |
|
""" |
|
Args: |
|
target: str as the target pattern, possible targets: |
|
+ v. |
|
+ v.ing |
|
+ who |
|
+ categ |
|
+ kw |
|
+ article |
|
phrase: str as the candidate |
|
|
|
Returns: |
|
bool |
|
""" |
|
|
|
if not ( target.startswith("<")\ |
|
and target.endswith(">...")\ |
|
): |
|
return False |
|
pos = target[1:-4] |
|
if pos=="v." or pos=="v.ing": |
|
return judge_verb(pos, phrase) |
|
return True |
|
|
|
def transform_target(target: str, phrase: str, **kargs) -> str: |
|
""" |
|
Args: |
|
target: str as the target pattern, possible targets: |
|
+ v. |
|
+ v.ing |
|
+ who |
|
+ categ |
|
+ kw |
|
+ article |
|
phrase: str as the candidate |
|
kargs: dict like {str: something} |
|
|
|
Returns: |
|
str as the transformed phrase |
|
""" |
|
|
|
if not ( target.startswith("<")\ |
|
and target.endswith(">...")\ |
|
): |
|
return phrase |
|
pos = target[1:-4] |
|
if pos=="v.": |
|
return to_infinitive(phrase) |
|
if pos=="v.ing": |
|
return to_vbg(phrase) |
|
if pos=="kw": |
|
return extract_keywords(phrase, **kargs) |
|
if pos=="who": |
|
return drop_titles(phrase, **kargs) |
|
return phrase |
|
|
|
def judge_verb(pos: str, phrase: str) -> bool: |
|
|
|
""" |
|
Judges if the first word of `phrase` is a verb infinitive or a present |
|
participle. |
|
|
|
Args: |
|
pos: str, "v." or "v.ing" |
|
phrase: str |
|
|
|
Returns: |
|
bool |
|
""" |
|
|
|
tokens = nltk.word_tokenize(phrase) |
|
pos_tags = nltk.pos_tag(tokens) |
|
return pos=="v." and pos_tags[0][1]=="VB"\ |
|
or pos=="v.ing" and pos_tags[0][1]=="VBG" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_infinitive(phrase: str) -> str: |
|
head, tail = phrase.split(maxsplit=1) |
|
return lemminflect.getLemma(head, upos="VERB")[0]\ |
|
+ " "\ |
|
+ tail |
|
|
|
def to_vbg(phrase: str) -> str: |
|
head, tail = phrase.split(maxsplit=1) |
|
return lemminflect.getInflection(head, tag="VBG")[0]\ |
|
+ " "\ |
|
+ tail |
|
|
|
def extract_keywords( phrase: str |
|
, rng: np.random.Generator = np.random.default_rng() |
|
) -> str: |
|
|
|
tokens = nltk.word_tokenize(phrase[1:-1]) |
|
pos_tags = nltk.pos_tag(tokens) |
|
|
|
keywords = map(lambda kwd: (kwd[0].lower(), kwd[1]), pos_tags) |
|
keywords = list( filter( lambda kwd: ( kwd[1].startswith("NN")\ |
|
or kwd[1].startswith("JJ")\ |
|
or kwd[1].startswith("VB") |
|
)\ |
|
and kwd[0] not in stopwords.words() |
|
, keywords |
|
) |
|
) |
|
noun_keywords = list( filter( lambda kwd: not kwd[1].startswith("VB") |
|
, keywords |
|
) |
|
) |
|
if len(noun_keywords)!=0: |
|
keywords = noun_keywords |
|
keywords = list(map(lambda kwd: "\"{:}\"".format(kwd[0]), keywords)) |
|
|
|
sampled_keywords = list( filter( lambda _: rng.random()<0.3 |
|
, keywords |
|
) |
|
) |
|
if len(sampled_keywords)==0: |
|
sampled_keywords = [keywords[rng.integers(len(keywords))]] |
|
return ", ".join(sampled_keywords) |
|
|
|
|
|
def drop_titles( phrase: str |
|
, rng: np.random.Generator = np.random.default_rng() |
|
) -> str: |
|
|
|
titles = phrase.split(", ") |
|
sampled_titles = list( filter( lambda _: rng.random()<0.3 |
|
, titles[1:] |
|
) |
|
) |
|
return ", ".join([titles[0]] + sampled_titles) |
|
|
|
|
|
def parse_file(file_name: str, with_regex: bool = False)\ |
|
-> Tuple[ List[sentence_pattern.Item] |
|
, Optional[List[Pattern[str]]] |
|
]: |
|
with open(file_name) as f: |
|
item_list = list( map( sentence_pattern.parse_pattern |
|
, f.read().splitlines() |
|
) |
|
) |
|
regex_list = list( map( lambda itm: re.compile(itm.regex) |
|
, item_list |
|
) |
|
) if with_regex else None |
|
return item_list, regex_list |
|
|
|
class TransformerSet: |
|
""" |
|
Sentences requiring transformation: |
|
1. search in command and instruction |
|
2. categ in command and instruction |
|
3. author in command and instruction |
|
4. article in command and instruction (Different) |
|
5. other sentences |
|
""" |
|
|
|
def __init__( self |
|
, search_pattern_file: str |
|
, article_pattern_file: str |
|
, article_command_pattern_file: str |
|
, categ_pattern_file: str |
|
, author_pattern_file: str |
|
, question_pattern_file: str |
|
, doccano_file: str |
|
, rng: np.random.Generator |
|
): |
|
|
|
""" |
|
Args: |
|
search_pattern_file (str): pattern file for search instructions |
|
article_pattern_file (str): pattern file for article instructions |
|
article_command_pattern_file (str): pattern file for article |
|
instructions in "command" field |
|
categ_pattern_file (str): pattern file for category instructions |
|
author_pattern_file (str): pattern file for author instructions |
|
question_pattern_file (str): pattern file for WikiHowNFQA questions |
|
doccano_file (str): the path to the json file exported by doccano |
|
rng (np.random.Generator): used to generate random indices |
|
""" |
|
|
|
self._search_template_lib: List[sentence_pattern.Item] |
|
self._article_template_lib: List[sentence_pattern.Item] |
|
self._article_command_template_lib: List[sentence_pattern.Item] |
|
self._categ_template_lib: List[sentence_pattern.Item] |
|
self._author_template_lib: List[sentence_pattern.Item] |
|
self._question_template_lib: List[sentence_pattern.Item] |
|
|
|
self._search_template_regex: List[Pattern[str]] |
|
|
|
|
|
|
|
|
|
|
|
self._search_template_lib, self._search_template_regex =\ |
|
parse_file(search_pattern_file, with_regex=True) |
|
self._article_template_lib, _ = parse_file(article_pattern_file) |
|
self._article_command_template_lib, _ = parse_file(article_command_pattern_file) |
|
self._categ_template_lib, _ = parse_file(categ_pattern_file) |
|
self._author_template_lib, _ = parse_file(author_pattern_file) |
|
self._question_template_lib, _ = parse_file(question_pattern_file) |
|
|
|
self._annotation_dict: Dict[str, List[str]] = {} |
|
with open(doccano_file) as f: |
|
doccano_dict = json.load(f) |
|
for anntt in doccano_dict: |
|
self._annotation_dict[anntt["text"]] = [anntt["text"]] + anntt["label"] |
|
|
|
self._rng: np.random.Generator = rng |
|
|
|
|
|
def transform(self, sentence: str, environment="instruction") -> str: |
|
|
|
""" |
|
Args: |
|
sentence: str |
|
environment: str, "instruction" or "command" |
|
|
|
Returns: |
|
str |
|
""" |
|
|
|
logger.debug("Starting transform: %s", sentence) |
|
|
|
has_leading_then = sentence.startswith("Then, ") |
|
if has_leading_then: |
|
sentence = sentence[6].upper() + sentence[7:] |
|
|
|
if sentence.startswith("Search an article"): |
|
transformed = self._transform_search(sentence) |
|
elif sentence.startswith("Access the article"): |
|
transformed = self._transform_article(sentence, environment) |
|
elif sentence.startswith("Access the page"): |
|
transformed = self._transform_categ(sentence) |
|
elif sentence.startswith("Check the author"): |
|
transformed = self._transform_author(sentence) |
|
elif sentence.startswith("My question is"): |
|
transformed: str = self._transform_question(sentence) |
|
|
|
elif sentence in self._annotation_dict: |
|
candidates = self._annotation_dict[sentence] |
|
random_index = self._rng.integers(len(candidates)) |
|
transformed = candidates[random_index] |
|
else: |
|
transformed = sentence |
|
|
|
if has_leading_then: |
|
transformed = "Then, " + transformed[0].lower() + transformed[1:] |
|
|
|
logger.debug("Transformation result: %s", transformed) |
|
|
|
return transformed |
|
|
|
|
|
def _transform_search(self, sentence: str) -> str: |
|
|
|
if sentence in self._annotation_dict: |
|
nb_candidates = len(self._annotation_dict[sentence]) |
|
weights = np.full((nb_candidates,), 2) |
|
weights[0] = 1 |
|
weights = weights/np.sum(weights) |
|
random_index = self._rng.choice( nb_candidates |
|
, p=weights |
|
) |
|
sentence = self._annotation_dict[sentence][random_index] |
|
|
|
template = None |
|
target_str = None |
|
for tpl, rgx in zip(self._search_template_lib, self._search_template_regex): |
|
match_ = rgx.match(sentence) |
|
if match_ is not None: |
|
template = tpl |
|
target_str = match_.group(1) |
|
break |
|
if template is None: |
|
return sentence |
|
|
|
|
|
|
|
target_pattern = template.get_targets() |
|
if not match_target(target_pattern[0], target_str): |
|
return sentence |
|
|
|
return self._apply_new_template(target_str, self._search_template_lib) |
|
|
|
|
|
def _transform_article(self, sentence: str, environment: str) -> str: |
|
|
|
""" |
|
Args: |
|
sentence: str |
|
environment: str, "instruction" or "command", indicates different |
|
target template library |
|
|
|
Returns: |
|
str |
|
""" |
|
|
|
assert sentence.startswith("Access the article \"")\ |
|
and sentence.endswith("\"") |
|
target_str = sentence[19:] |
|
|
|
target_template_library = self._article_template_lib\ |
|
if environment=="instruction"\ |
|
else self._article_command_template_lib |
|
|
|
return self._apply_new_template(target_str, target_template_library) |
|
|
|
|
|
def _transform_categ(self, sentence: str) -> str: |
|
assert sentence.startswith("Access the page of category ") |
|
target_str = sentence[28:] |
|
return self._apply_new_template(target_str, self._categ_template_lib) |
|
def _transform_author(self, sentence: str) -> str: |
|
assert sentence.startswith("Check the author page of ")\ |
|
and sentence.endswith(".") |
|
target_str = sentence[25:-1] |
|
return self._apply_new_template(target_str, self._author_template_lib) |
|
def _transform_question(self, sentence: str) -> str: |
|
assert sentence.startswith("My question is: ")\ |
|
and sentence.endswith("?") |
|
target_str: str = sentence[16:-1] |
|
return self._apply_new_template(target_str, self._question_template_lib) |
|
|
|
def _apply_new_template( self |
|
, target_str: str |
|
, template_library: List[sentence_pattern.Item] |
|
) -> str: |
|
|
|
new_template_index = self._rng.integers(len(template_library)) |
|
new_template = template_library[new_template_index] |
|
|
|
|
|
target_pattern = new_template.get_targets() |
|
target_str = transform_target(target_pattern[0], target_str, rng=self._rng) |
|
new_template.implement(iter([target_str])) |
|
return new_template.instantiate() |
|
|
|
|