| {"repo": "IntelLabs/nlp-architect", "pull_number": 108, "instance_id": "IntelLabs__nlp-architect-108", "issue_numbers": "", "base_commit": "acacb498fadfa51ee1ddb8e2441af6a699621ae2", "patch": "diff --git a/examples/cross_doc_coref/cross_doc_coref_sieves.py b/examples/cross_doc_coref/cross_doc_coref_sieves.py\n--- a/examples/cross_doc_coref/cross_doc_coref_sieves.py\n+++ b/examples/cross_doc_coref/cross_doc_coref_sieves.py\n@@ -125,7 +125,6 @@ def run_cdc_pipeline():\n \n \n if __name__ == '__main__':\n- logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n run_cdc_pipeline()\ndiff --git a/examples/cross_doc_coref/relation_extraction_example.py b/examples/cross_doc_coref/relation_extraction_example.py\n--- a/examples/cross_doc_coref/relation_extraction_example.py\n+++ b/examples/cross_doc_coref/relation_extraction_example.py\n@@ -90,6 +90,5 @@ def run_example():\n \n \n if __name__ == '__main__':\n- logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n run_example()\ndiff --git a/examples/np2vec/inference.py b/examples/np2vec/inference.py\n--- a/examples/np2vec/inference.py\n+++ b/examples/np2vec/inference.py\n@@ -20,7 +20,6 @@\n from nlp_architect.models.np2vec import NP2vec\n from nlp_architect.utils.io import validate_existing_filepath, check_size\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n if __name__ == \"__main__\":\ndiff --git a/examples/np2vec/train.py b/examples/np2vec/train.py\n--- a/examples/np2vec/train.py\n+++ b/examples/np2vec/train.py\n@@ -20,7 +20,6 @@\n from nlp_architect.models.np2vec import NP2vec\n from nlp_architect.utils.io import check_size, validate_existing_filepath\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n \ndiff --git a/nlp_architect/__init__.py b/nlp_architect/__init__.py\n--- a/nlp_architect/__init__.py\n+++ b/nlp_architect/__init__.py\n@@ -15,8 +15,33 @@\n # ******************************************************************************\n from os import path\n from pathlib import Path\n+import logging\n+\n+logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n+logger = logging.getLogger(__name__) \n \n LIBRARY_PATH = Path(path.realpath(__file__)).parent\n LIBRARY_ROOT = LIBRARY_PATH.parent\n LIBRARY_OUT = Path(Path.home()) / 'nlp-architect' / 'cache'\n LIBRARY_DATASETS = LIBRARY_ROOT / 'datasets'\n+\n+try:\n+ # Capirca uses Google's abseil-py library, which uses a Google-specific\n+ # wrapper for logging. That wrapper will write a warning to sys.stderr if\n+ # the Google command-line flags library has not been initialized.\n+ #\n+ # https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825\n+ #\n+ # This is not right behavior for Python code that is invoked outside of a\n+ # Google-authored main program. Use knowledge of abseil-py to disable that\n+ # warning; ignore and continue if something goes wrong.\n+ import absl.logging\n+\n+ # https://github.com/abseil/abseil-py/issues/99\n+ logging.root.removeHandler(absl.logging._absl_handler)\n+ # https://github.com/abseil/abseil-py/issues/102\n+ absl.logging._warn_preinit_stderr = False\n+ absl.logging.set_verbosity('info')\n+ absl.logging.set_stderrthreshold('info')\n+except Exception:\n+ pass\ndiff --git a/nlp_architect/cli/__init__.py b/nlp_architect/cli/__init__.py\n--- a/nlp_architect/cli/__init__.py\n+++ b/nlp_architect/cli/__init__.py\n@@ -23,10 +23,7 @@\n cli_train_cmd)\n from nlp_architect.version import NLP_ARCHITECT_VERSION\n \n-# Setup logging\n-logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n- datefmt='%m/%d/%Y %H:%M:%S',\n- level=logging.INFO)\n+logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n \n \n def run_cli():\ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_reference_dict_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_reference_dict_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_reference_dict_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_reference_dict_dump.py\n@@ -22,7 +22,6 @@\n from nlp_architect.models.cross_doc_coref.system.cdc_utils import load_mentions_vocab_from_files\n from nlp_architect.utils import io\n \n-logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n \n parser = argparse.ArgumentParser(description='Create Referent dictionary dataset only dump')\ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_verbocean_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_verbocean_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_verbocean_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_verbocean_dump.py\n@@ -22,7 +22,6 @@\n from nlp_architect.models.cross_doc_coref.system.cdc_utils import load_mentions_vocab_from_files\n from nlp_architect.utils import io\n \n-logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n \n parser = argparse.ArgumentParser(description='Create Verb-Ocean dataset only dump')\ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_wiki_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_wiki_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_wiki_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_wiki_dump.py\n@@ -24,7 +24,6 @@\n from nlp_architect.utils import io\n from nlp_architect.utils.io import json_dumper\n \n-logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n result_dump = {}\ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_elmo_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_elmo_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_elmo_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_elmo_dump.py\n@@ -23,7 +23,6 @@\n from nlp_architect.data.cdc_resources.embedding.embed_elmo import ElmoEmbedding\n from nlp_architect.utils import io\n \n-logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n \n \ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_glove_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_glove_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_glove_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_word_embed_glove_dump.py\n@@ -22,7 +22,6 @@\n from nlp_architect.models.cross_doc_coref.system.cdc_utils import load_mentions_vocab_from_files\n from nlp_architect.utils import io\n \n-logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n \n parser = argparse.ArgumentParser(description='Create GloVe dataset only dump')\ndiff --git a/nlp_architect/data/cdc_resources/gen_scripts/create_wordnet_dump.py b/nlp_architect/data/cdc_resources/gen_scripts/create_wordnet_dump.py\n--- a/nlp_architect/data/cdc_resources/gen_scripts/create_wordnet_dump.py\n+++ b/nlp_architect/data/cdc_resources/gen_scripts/create_wordnet_dump.py\n@@ -22,7 +22,6 @@\n from nlp_architect.utils import io\n from nlp_architect.utils.io import json_dumper\n \n-logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n \n parser = argparse.ArgumentParser(description='Create WordNet dataset only dump')\ndiff --git a/nlp_architect/models/transformers/base_model.py b/nlp_architect/models/transformers/base_model.py\n--- a/nlp_architect/models/transformers/base_model.py\n+++ b/nlp_architect/models/transformers/base_model.py\n@@ -19,16 +19,19 @@\n from typing import List, Union\n \n import torch\n-from pytorch_transformers import XLNetConfig, XLMConfig, BertTokenizer, BertConfig, \\\n- XLNetTokenizer, XLMTokenizer, AdamW, WarmupLinearSchedule\n from torch.utils.data import DataLoader\n-from tqdm import trange, tqdm\n+from tqdm import tqdm, trange\n+from transformers import (AdamW, BertConfig, BertTokenizer, RobertaConfig,\n+ RobertaTokenizer, WarmupLinearSchedule, XLMConfig,\n+ XLMTokenizer, XLNetConfig, XLNetTokenizer)\n \n from nlp_architect.models import TrainableModel\n-from nlp_architect.models.transformers.quantized_bert import QuantizedBertConfig\n+from nlp_architect.models.transformers.quantized_bert import \\\n+ QuantizedBertConfig\n \n logger = logging.getLogger(__name__)\n \n+\n ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys())\n for conf in (BertConfig, XLNetConfig, XLMConfig)), ())\n \n@@ -48,6 +51,7 @@ class TransformerBase(TrainableModel):\n 'quant_bert': (QuantizedBertConfig, BertTokenizer),\n 'xlnet': (XLNetConfig, XLNetTokenizer),\n 'xlm': (XLMConfig, XLMTokenizer),\n+ 'roberta': (RobertaConfig, RobertaTokenizer)\n }\n \n def __init__(self, model_type: str, model_name_or_path: str, labels: List[str] = None,\ndiff --git a/nlp_architect/models/transformers/quantized_bert.py b/nlp_architect/models/transformers/quantized_bert.py\n--- a/nlp_architect/models/transformers/quantized_bert.py\n+++ b/nlp_architect/models/transformers/quantized_bert.py\n@@ -18,33 +18,25 @@\n Quantized BERT layers and model\n \"\"\"\n \n-import sys\n-import os\n import logging\n+import os\n+import sys\n \n import torch\n from torch import nn\n-from pytorch_transformers.modeling_bert import (BertEmbeddings,\n- BertLayerNorm,\n- BertSelfAttention,\n- BertSelfOutput,\n- BertAttention,\n- BertIntermediate,\n- BertOutput,\n- BertLayer,\n- BertEncoder,\n- BertPooler,\n- BertModel,\n- BertForQuestionAnswering,\n- BertForSequenceClassification,\n- BertForTokenClassification,\n- ACT2FN,\n- BertPreTrainedModel,\n- BertConfig)\n+from transformers.modeling_bert import (ACT2FN, BertAttention, BertConfig,\n+ BertEmbeddings, BertEncoder,\n+ BertForQuestionAnswering,\n+ BertForSequenceClassification,\n+ BertForTokenClassification,\n+ BertIntermediate, BertLayer,\n+ BertLayerNorm, BertModel, BertOutput,\n+ BertPooler, BertPreTrainedModel,\n+ BertSelfAttention, BertSelfOutput)\n \n from nlp_architect.nn.torch.quantization import (QuantizationConfig,\n- QuantizedLayer,\n QuantizedEmbedding,\n+ QuantizedLayer,\n QuantizedLinear)\n \n logger = logging.getLogger(__name__)\ndiff --git a/nlp_architect/models/transformers/sequence_classification.py b/nlp_architect/models/transformers/sequence_classification.py\n--- a/nlp_architect/models/transformers/sequence_classification.py\n+++ b/nlp_architect/models/transformers/sequence_classification.py\n@@ -20,15 +20,17 @@\n \n import numpy as np\n import torch\n-from pytorch_transformers import (BertForSequenceClassification,\n- XLMForSequenceClassification,\n- XLNetForSequenceClassification)\n from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n+from transformers import (BertForSequenceClassification,\n+ RobertaForSequenceClassification,\n+ XLMForSequenceClassification,\n+ XLNetForSequenceClassification)\n \n from nlp_architect.data.sequence_classification import SequenceClsInputExample\n from nlp_architect.models.transformers.base_model import (InputFeatures,\n TransformerBase)\n-from nlp_architect.models.transformers.quantized_bert import QuantizedBertForSequenceClassification\n+from nlp_architect.models.transformers.quantized_bert import \\\n+ QuantizedBertForSequenceClassification\n from nlp_architect.utils.metrics import accuracy\n \n logger = logging.getLogger(__name__)\n@@ -50,7 +52,8 @@ class TransformerSequenceClassifier(TransformerBase):\n 'bert': BertForSequenceClassification,\n 'quant_bert': QuantizedBertForSequenceClassification,\n 'xlnet': XLNetForSequenceClassification,\n- 'xlm': XLMForSequenceClassification\n+ 'xlm': XLMForSequenceClassification,\n+ 'roberta': RobertaForSequenceClassification\n }\n \n def __init__(self, model_type: str, labels: List[str] = None,\n@@ -64,8 +67,12 @@ def __init__(self, model_type: str, labels: List[str] = None,\n num_labels=self.num_labels, *args,\n **kwargs)\n self.model_class = self.MODEL_CLASS[model_type]\n- self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n- '.ckpt' in self.model_name_or_path), config=self.config, from_8bit=load_quantized)\n+ if model_type == 'quant_bert' and load_quantized:\n+ self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n+ '.ckpt' in self.model_name_or_path), config=self.config, from_8bit=load_quantized)\n+ else:\n+ self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n+ '.ckpt' in self.model_name_or_path), config=self.config)\n self.task_type = task_type\n self.metric_fn = metric_fn\n self.to(self.device, self.n_gpus)\n@@ -127,7 +134,7 @@ def evaluate_predictions(self, logits, label_ids):\n except TypeError:\n output_eval_file = os.devnull\n with open(output_eval_file, \"w\") as writer:\n- logger.info(\"***** Eval results *****\")\n+ logger.info(\"***** Evaluation results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n@@ -152,17 +159,11 @@ def convert_to_tensors(self,\n self.tokenizer,\n self.task_type,\n include_labels,\n- cls_token_at_end=bool(\n- self.model_type in ['xlnet']),\n- cls_token=self.tokenizer.cls_token,\n- sep_token=self.tokenizer.sep_token,\n- cls_token_segment_id=2 if\n- self.model_type in ['xlnet'] else 1,\n- # pad on the left for xlnet\n pad_on_left=bool(\n self.model_type in ['xlnet']),\n- pad_token_segment_id=4\n- if self.model_type in ['xlnet'] else 0)\n+ pad_token=self.tokenizer.convert_tokens_to_ids(\n+ [self.tokenizer.pad_token])[0],\n+ pad_token_segment_id=4 if self.model_type in ['xlnet'] else 0)\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n@@ -206,12 +207,15 @@ def _postprocess_logits(self, logits):\n preds = np.squeeze(preds)\n return preds\n \n- def _convert_examples_to_features(self, examples, max_seq_length,\n- tokenizer, task_type, include_labels=True,\n- cls_token_at_end=False, pad_on_left=False,\n- cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n- sequence_a_segment_id=0, sequence_b_segment_id=1,\n- cls_token_segment_id=1, pad_token_segment_id=0,\n+ def _convert_examples_to_features(self,\n+ examples,\n+ max_seq_length,\n+ tokenizer,\n+ task_type,\n+ include_labels=True,\n+ pad_on_left=False,\n+ pad_token=0,\n+ pad_token_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\" Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n@@ -229,72 +233,31 @@ def _convert_examples_to_features(self, examples, max_seq_length,\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n \n- tokens_a = tokenizer.tokenize(example.text)\n-\n- tokens_b = None\n- if example.text_b:\n- tokens_b = tokenizer.tokenize(example.text_b)\n- # Modifies `tokens_a` and `tokens_b` in place so that the total\n- # length is less than the specified length.\n- # Account for [CLS], [SEP], [SEP] with \"- 3\"\n- _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n- else:\n- # Account for [CLS] and [SEP] with \"- 2\"\n- if len(tokens_a) > max_seq_length - 2:\n- tokens_a = tokens_a[:(max_seq_length - 2)]\n-\n- # The convention in BERT is:\n- # (a) For sequence pairs:\n- # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n- # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n- # (b) For single sequences:\n- # tokens: [CLS] the dog is hairy . [SEP]\n- # type_ids: 0 0 0 0 0 0 0\n- #\n- # Where \"type_ids\" are used to indicate whether this is the first\n- # sequence or the second sequence. The embedding vectors for `type=0` and\n- # `type=1` were learned during pre-training and are added to the wordpiece\n- # embedding vector (and position vector). This is not *strictly* necessary\n- # since the [SEP] token unambiguously separates the sequences, but it makes\n- # it easier for the model to learn the concept of sequences.\n- #\n- # For classification tasks, the first vector (corresponding to [CLS]) is\n- # used as as the \"sentence vector\". Note that this only makes sense because\n- # the entire model is fine-tuned.\n- tokens = tokens_a + [sep_token]\n- segment_ids = [sequence_a_segment_id] * len(tokens)\n-\n- if tokens_b:\n- tokens += tokens_b + [sep_token]\n- segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n+ inputs = tokenizer.encode_plus(\n+ example.text,\n+ example.text_b,\n+ add_special_tokens=True,\n+ max_length=max_seq_length,\n+ )\n+ input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n \n- if cls_token_at_end:\n- tokens = tokens + [cls_token]\n- segment_ids = segment_ids + [cls_token_segment_id]\n- else:\n- tokens = [cls_token] + tokens\n- segment_ids = [cls_token_segment_id] + segment_ids\n-\n- input_ids = tokenizer.convert_tokens_to_ids(tokens)\n-\n- # The mask has 1 for real tokens and 0 for padding tokens. Only real\n- # tokens are attended to.\n- input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n \n- # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n- input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n- segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n+ attention_mask = ([0 if mask_padding_with_zero else 1]\n+ * padding_length) + attention_mask\n+ token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n- input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n- segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n+ attention_mask = attention_mask + \\\n+ ([0 if mask_padding_with_zero else 1] * padding_length)\n+ token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n \n assert len(input_ids) == max_seq_length\n- assert len(input_mask) == max_seq_length\n- assert len(segment_ids) == max_seq_length\n+ assert len(attention_mask) == max_seq_length\n+ assert len(token_type_ids) == max_seq_length\n \n if include_labels:\n if task_type == \"classification\":\n@@ -307,23 +270,7 @@ def _convert_examples_to_features(self, examples, max_seq_length,\n label_id = None\n \n features.append(InputFeatures(input_ids=input_ids,\n- input_mask=input_mask,\n- segment_ids=segment_ids,\n+ input_mask=attention_mask,\n+ segment_ids=token_type_ids,\n label_id=label_id))\n return features\n-\n-\n-def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n- \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n- # This is a simple heuristic which will always truncate the longer sequence\n- # one token at a time. This makes more sense than truncating an equal percent\n- # of tokens from each, since if one sequence is very short then each token\n- # that's truncated likely contains more information than a longer sequence.\n- while True:\n- total_length = len(tokens_a) + len(tokens_b)\n- if total_length <= max_length:\n- break\n- if len(tokens_a) > len(tokens_b):\n- tokens_a.pop()\n- else:\n- tokens_b.pop()\ndiff --git a/nlp_architect/models/transformers/token_classification.py b/nlp_architect/models/transformers/token_classification.py\n--- a/nlp_architect/models/transformers/token_classification.py\n+++ b/nlp_architect/models/transformers/token_classification.py\n@@ -13,21 +13,32 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n # ******************************************************************************\n-from typing import Union, List\n+import logging\n+from typing import List, Union\n \n import torch\n-from pytorch_transformers import BertForTokenClassification, XLNetPreTrainedModel, XLNetModel\n-from torch.nn import CrossEntropyLoss, functional as F\n-from torch.utils.data import DataLoader, TensorDataset, SequentialSampler\n+from torch.nn import CrossEntropyLoss, Dropout, Linear\n+from torch.nn import functional as F\n+from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n+from transformers import (ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n+ BertForTokenClassification, BertPreTrainedModel,\n+ RobertaConfig, RobertaModel, XLNetModel,\n+ XLNetPreTrainedModel)\n \n from nlp_architect.data.sequential_tagging import TokenClsInputExample\n-from nlp_architect.models.transformers.base_model import TransformerBase, logger, InputFeatures\n-from nlp_architect.models.transformers.quantized_bert import QuantizedBertForTokenClassification\n+from nlp_architect.models.transformers.base_model import (InputFeatures,\n+ TransformerBase,\n+ logger)\n+from nlp_architect.models.transformers.quantized_bert import \\\n+ QuantizedBertForTokenClassification\n from nlp_architect.utils.metrics import tagging\n \n+logger = logging.getLogger()\n+logger.setLevel(logging.INFO)\n \n-def bert_for_tagging_forward(bert, input_ids, token_type_ids=None, attention_mask=None,\n- labels=None, position_ids=None, head_mask=None, valid_ids=None):\n+\n+def _bert_token_tagging_head_fw(bert, input_ids, token_type_ids=None, attention_mask=None,\n+ labels=None, position_ids=None, head_mask=None, valid_ids=None):\n outputs = bert.bert(\n input_ids,\n token_type_ids=token_type_ids,\n@@ -47,8 +58,9 @@ def bert_for_tagging_forward(bert, input_ids, token_type_ids=None, attention_mas\n return (logits,)\n \n \n-class BertForTagging(BertForTokenClassification):\n+class BertTokenClassificationHead(BertForTokenClassification):\n \"\"\"BERT token classification head with linear classifier.\n+ This head's forward ignores word piece tokens in its linear layer.\n \n The forward requires an additional 'valid_ids' map that maps the tensors\n for valid tokens (e.g., ignores additional word piece tokens generated by\n@@ -57,14 +69,15 @@ class BertForTagging(BertForTokenClassification):\n \n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,\n position_ids=None, head_mask=None, valid_ids=None):\n- return bert_for_tagging_forward(self, input_ids, token_type_ids=token_type_ids,\n- attention_mask=attention_mask, labels=labels,\n- position_ids=position_ids, head_mask=head_mask,\n- valid_ids=valid_ids)\n+ return _bert_token_tagging_head_fw(self, input_ids, token_type_ids=token_type_ids,\n+ attention_mask=attention_mask, labels=labels,\n+ position_ids=position_ids, head_mask=head_mask,\n+ valid_ids=valid_ids)\n \n \n-class QuantizedBertForTagging(QuantizedBertForTokenClassification):\n+class QuantizedBertForTokenClassificationHead(QuantizedBertForTokenClassification):\n \"\"\"Quantized BERT token classification head with linear classifier.\n+ This head's forward ignores word piece tokens in its linear layer.\n \n The forward requires an additional 'valid_ids' map that maps the tensors\n for valid tokens (e.g., ignores additional word piece tokens generated by\n@@ -73,14 +86,15 @@ class QuantizedBertForTagging(QuantizedBertForTokenClassification):\n \n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,\n position_ids=None, head_mask=None, valid_ids=None):\n- return bert_for_tagging_forward(self, input_ids, token_type_ids=token_type_ids,\n- attention_mask=attention_mask, labels=labels,\n- position_ids=position_ids, head_mask=head_mask,\n- valid_ids=valid_ids)\n+ return _bert_token_tagging_head_fw(self, input_ids, token_type_ids=token_type_ids,\n+ attention_mask=attention_mask, labels=labels,\n+ position_ids=position_ids, head_mask=head_mask,\n+ valid_ids=valid_ids)\n \n \n-class XLNetForTokenClassification(XLNetPreTrainedModel):\n+class XLNetTokenClassificationHead(XLNetPreTrainedModel):\n \"\"\"XLNet token classification head with linear classifier.\n+ This head's forward ignores word piece tokens in its linear layer.\n \n The forward requires an additional 'valid_ids' map that maps the tensors\n for valid tokens (e.g., ignores additional word piece tokens generated by\n@@ -88,7 +102,7 @@ class XLNetForTokenClassification(XLNetPreTrainedModel):\n \"\"\"\n \n def __init__(self, config):\n- super(XLNetForTokenClassification, self).__init__(config)\n+ super(XLNetTokenClassificationHead, self).__init__(config)\n self.num_labels = config.num_labels\n \n self.transformer = XLNetModel(config)\n@@ -100,7 +114,6 @@ def __init__(self, config):\n def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None,\n mems=None, perm_mask=None, target_mapping=None,\n labels=None, head_mask=None, valid_ids=None):\n- # raise NotImplementedError\n transformer_outputs = self.transformer(input_ids, token_type_ids=token_type_ids,\n input_mask=input_mask, attention_mask=attention_mask,\n mems=mems, perm_mask=perm_mask, target_mapping=target_mapping,\n@@ -119,6 +132,52 @@ def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mas\n return (logits,)\n \n \n+class RobertaForTokenClassificationHead(BertPreTrainedModel):\n+ \"\"\"RoBERTa token classification head with linear classifier.\n+ This head's forward ignores word piece tokens in its linear layer.\n+\n+ The forward requires an additional 'valid_ids' map that maps the tensors\n+ for valid tokens (e.g., ignores additional word piece tokens generated by\n+ the tokenizer, as in NER task the 'X' label).\n+ \"\"\"\n+ config_class = RobertaConfig\n+ pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n+ base_model_prefix = \"roberta\"\n+\n+ def __init__(self, config):\n+ super(RobertaForTokenClassificationHead, self).__init__(config)\n+ self.num_labels = config.num_labels\n+\n+ self.roberta = RobertaModel(config)\n+ self.dropout = Dropout(config.hidden_dropout_prob)\n+ self.classifier = Linear(config.hidden_size, config.num_labels)\n+\n+ self.init_weights()\n+\n+ def forward(self, input_ids, attention_mask=None, token_type_ids=None,\n+ position_ids=None, head_mask=None, labels=None, valid_ids=None):\n+\n+ outputs = self.roberta(input_ids,\n+ attention_mask=attention_mask,\n+ token_type_ids=token_type_ids,\n+ position_ids=position_ids,\n+ head_mask=head_mask)\n+\n+ sequence_output = outputs[0]\n+\n+ sequence_output = self.dropout(sequence_output)\n+ logits = self.classifier(sequence_output)\n+\n+ if labels is not None:\n+ loss_fct = CrossEntropyLoss(ignore_index=0)\n+ active_positions = valid_ids.view(-1) != 0.0\n+ active_labels = labels.view(-1)[active_positions]\n+ active_logits = logits.view(-1, self.num_labels)[active_positions]\n+ loss = loss_fct(active_logits, active_labels)\n+ return (loss, logits,)\n+ return (logits,)\n+\n+\n class TransformerTokenClassifier(TransformerBase):\n \"\"\"\n Transformer word tagging classifier\n@@ -128,9 +187,10 @@ class TransformerTokenClassifier(TransformerBase):\n labels (List[str], optional): list of tag labels\n \"\"\"\n MODEL_CLASS = {\n- 'bert': BertForTagging,\n- 'quant_bert': QuantizedBertForTagging,\n- 'xlnet': XLNetForTokenClassification,\n+ 'bert': BertTokenClassificationHead,\n+ 'quant_bert': QuantizedBertForTokenClassificationHead,\n+ 'xlnet': XLNetTokenClassificationHead,\n+ 'roberta': RobertaForTokenClassificationHead,\n }\n \n def __init__(self, model_type: str, labels: List[str] = None, *args, load_quantized=False, **kwargs):\n@@ -145,8 +205,12 @@ def __init__(self, model_type: str, labels: List[str] = None, *args, load_quanti\n **kwargs)\n \n self.model_class = self.MODEL_CLASS[model_type]\n- self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n- '.ckpt' in self.model_name_or_path), config=self.config, from_8bit=load_quantized)\n+ if model_type == 'quant_bert' and load_quantized:\n+ self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n+ '.ckpt' in self.model_name_or_path), config=self.config, from_8bit=load_quantized)\n+ else:\n+ self.model = self.model_class.from_pretrained(self.model_name_or_path, from_tf=bool(\n+ '.ckpt' in self.model_name_or_path), config=self.config)\n self.to(self.device, self.n_gpus)\n \n def train(self,\n@@ -215,7 +279,7 @@ def evaluate_predictions(self, logits, label_ids):\n logits = logits.detach().cpu().numpy()\n out_label_ids = active_labels.detach().cpu().numpy()\n _, _, f1 = self.extract_labels(out_label_ids, self.labels_id_map, logits)\n- logger.info(\"Evaluation on set = F1: {}\".format(f1))\n+ logger.info(\"Results on evaluation set: F1 = {}\".format(f1))\n \n @staticmethod\n def extract_labels(label_ids, label_map, logits):\n@@ -248,15 +312,18 @@ def convert_to_tensors(self,\n include_labels,\n # xlnet has a cls token at the end\n cls_token_at_end=bool(\n- self.model_type in [\n- 'xlnet']),\n+ self.model_type in ['xlnet']),\n cls_token=self.tokenizer.cls_token,\n- sep_token=self.tokenizer.sep_token,\n cls_token_segment_id=2\n- if self.model_type in['xlnet'] else 1,\n+ if self.model_type in['xlnet'] else 0,\n+ sep_token=self.tokenizer.sep_token,\n+ sep_token_extra=bool(\n+ self.model_type in ['roberta']),\n # pad on the left for xlnet\n pad_on_left=bool(\n self.model_type in ['xlnet']),\n+ pad_token=self.tokenizer.convert_tokens_to_ids(\n+ [self.tokenizer.pad_token])[0],\n pad_token_segment_id=4\n if self.model_type in [\n 'xlnet'] else 0)\n@@ -283,6 +350,7 @@ def _convert_examples_to_features(self,\n cls_token_at_end=False, pad_on_left=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_segment_id=0,\n+ sep_token_extra=0,\n cls_token_segment_id=1, pad_token_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\" Loads a data file into a list of `InputBatch`s\n@@ -317,33 +385,21 @@ def _convert_examples_to_features(self,\n labels.extend(v_lbl)\n \n # truncate by max_seq_length\n- tokens = tokens[:(max_seq_length - 2)]\n+ special_tokens_count = 3 if sep_token_extra else 2\n+ tokens = tokens[:(max_seq_length - special_tokens_count)]\n+ valid_tokens = valid_tokens[:(max_seq_length - special_tokens_count)]\n if include_labels:\n- labels = labels[:(max_seq_length - 2)]\n- valid_tokens = valid_tokens[:(max_seq_length - 2)]\n-\n- # The convention in BERT is:\n- # (a) For sequence pairs:\n- # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n- # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n- # (b) For single sequences:\n- # tokens: [CLS] the dog is hairy . [SEP]\n- # type_ids: 0 0 0 0 0 0 0\n- #\n- # Where \"type_ids\" are used to indicate whether this is the first\n- # sequence or the second sequence. The embedding vectors for `type=0` and\n- # `type=1` were learned during pre-training and are added to the wordpiece\n- # embedding vector (and position vector). This is not *strictly* necessary\n- # since the [SEP] token unambiguously separates the sequences, but it makes\n- # it easier for the model to learn the concept of sequences.\n- #\n- # For classification tasks, the first vector (corresponding to [CLS]) is\n- # used as as the \"sentence vector\". Note that this only makes sense because\n- # the entire model is fine-tuned.\n- tokens = tokens + [sep_token]\n+ labels = labels[:(max_seq_length - special_tokens_count)]\n+\n+ tokens += [sep_token]\n if include_labels:\n- labels = labels + [label_pad]\n- valid_tokens = valid_tokens + [0]\n+ labels += [label_pad]\n+ valid_tokens += [0]\n+ if sep_token_extra: # roberta special case\n+ tokens += [sep_token]\n+ valid_tokens += [0]\n+ if include_labels:\n+ labels += [label_pad]\n segment_ids = [sequence_segment_id] * len(tokens)\n \n if cls_token_at_end:\n@@ -361,8 +417,6 @@ def _convert_examples_to_features(self,\n \n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n \n- # The mask has 1 for real tokens and 0 for padding tokens. Only real\n- # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n \n # Zero-pad up to the sequence length.\ndiff --git a/nlp_architect/procedures/transformers/base.py b/nlp_architect/procedures/transformers/base.py\n--- a/nlp_architect/procedures/transformers/base.py\n+++ b/nlp_architect/procedures/transformers/base.py\n@@ -14,12 +14,9 @@\n # limitations under the License.\n # ******************************************************************************\n import argparse\n-import logging\n \n from nlp_architect.models.transformers.base_model import get_models\n \n-logger = logging.getLogger(__name__)\n-\n \n def create_base_args(parser: argparse.ArgumentParser, model_types=None):\n \"\"\"Add base arguments for Transformers based models\ndiff --git a/nlp_architect/procedures/transformers/seq_tag.py b/nlp_architect/procedures/transformers/seq_tag.py\n--- a/nlp_architect/procedures/transformers/seq_tag.py\n+++ b/nlp_architect/procedures/transformers/seq_tag.py\n@@ -35,10 +35,6 @@\n from nlp_architect.utils.text import SpacyInstance\n \n logger = logging.getLogger(__name__)\n-# Setup logging\n-logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n- datefmt='%m/%d/%Y %H:%M:%S',\n- level=logging.INFO)\n \n \n @register_train_cmd(name='transformer_token',\ndiff --git a/nlp_architect/solutions/set_expansion/expand_server.py b/nlp_architect/solutions/set_expansion/expand_server.py\n--- a/nlp_architect/solutions/set_expansion/expand_server.py\n+++ b/nlp_architect/solutions/set_expansion/expand_server.py\n@@ -24,7 +24,6 @@\n from nlp_architect.utils.io import validate_existing_filepath, check_size\n from nlp_architect.solutions.set_expansion.prepare_data import load_parser, extract_noun_phrases\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n \ndiff --git a/nlp_architect/solutions/set_expansion/prepare_data.py b/nlp_architect/solutions/set_expansion/prepare_data.py\n--- a/nlp_architect/solutions/set_expansion/prepare_data.py\n+++ b/nlp_architect/solutions/set_expansion/prepare_data.py\n@@ -32,7 +32,6 @@\n from nlp_architect.utils.io import check_size, download_unlicensed_file, validate_parent_exists\n from nlp_architect.utils.text import spacy_normalizer, SpacyInstance\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n np2id = {}\ndiff --git a/nlp_architect/solutions/set_expansion/set_expand.py b/nlp_architect/solutions/set_expansion/set_expand.py\n--- a/nlp_architect/solutions/set_expansion/set_expand.py\n+++ b/nlp_architect/solutions/set_expansion/set_expand.py\n@@ -22,7 +22,6 @@\n from nlp_architect.models.np2vec import NP2vec\n from nlp_architect.utils.io import validate_existing_filepath, check_size, load_json_file\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n \ndiff --git a/nlp_architect/solutions/set_expansion/ui/main.py b/nlp_architect/solutions/set_expansion/ui/main.py\n--- a/nlp_architect/solutions/set_expansion/ui/main.py\n+++ b/nlp_architect/solutions/set_expansion/ui/main.py\n@@ -30,7 +30,6 @@\n import nlp_architect.solutions.set_expansion.ui.settings as settings\n \n # pylint: skip-file\n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n vocab = None\ndiff --git a/nlp_architect/solutions/trend_analysis/np_scorer.py b/nlp_architect/solutions/trend_analysis/np_scorer.py\n--- a/nlp_architect/solutions/trend_analysis/np_scorer.py\n+++ b/nlp_architect/solutions/trend_analysis/np_scorer.py\n@@ -29,7 +29,6 @@\n chunker_model_dat_file = 'model_info.dat.params'\n chunker_model_file = 'model.h5'\n chunker_local_path = str(LIBRARY_OUT / 'chunker-pretrained')\n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n \n \ndiff --git a/nlp_architect/solutions/trend_analysis/topic_extraction.py b/nlp_architect/solutions/trend_analysis/topic_extraction.py\n--- a/nlp_architect/solutions/trend_analysis/topic_extraction.py\n+++ b/nlp_architect/solutions/trend_analysis/topic_extraction.py\n@@ -33,7 +33,6 @@\n from nlp_architect.utils.io import validate_existing_directory\n from nlp_architect.utils.text import SpacyInstance\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n data_dir = str(LIBRARY_OUT / 'trend-analysis-data')\n \ndiff --git a/nlp_architect/solutions/trend_analysis/trend_analysis.py b/nlp_architect/solutions/trend_analysis/trend_analysis.py\n--- a/nlp_architect/solutions/trend_analysis/trend_analysis.py\n+++ b/nlp_architect/solutions/trend_analysis/trend_analysis.py\n@@ -33,7 +33,6 @@\n from nlp_architect.utils.text import simple_normalizer\n \n dir = str(LIBRARY_OUT / 'trend-analysis-data')\n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n target_topics_path = path.join(dir, 'target_topics.csv')\n ref_topics_path = path.join(dir, 'ref_topics.csv')\ndiff --git a/nlp_architect/solutions/trend_analysis/ui/main.py b/nlp_architect/solutions/trend_analysis/ui/main.py\n--- a/nlp_architect/solutions/trend_analysis/ui/main.py\n+++ b/nlp_architect/solutions/trend_analysis/ui/main.py\n@@ -32,7 +32,6 @@\n from nlp_architect.solutions.trend_analysis.trend_analysis import analyze\n from nlp_architect import LIBRARY_OUT\n \n-logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n logger = logging.getLogger(__name__)\n dir = str(LIBRARY_OUT / 'trend-analysis-data')\n graph_data_path = path.join(dir, 'graph_data.csv')\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-11-03T14:14:53Z"} | |