File size: 26,830 Bytes
5980447
1
2
{"repo": "himkt/pyner", "pull_number": 48, "instance_id": "himkt__pyner-48", "issue_numbers": "", "base_commit": "84cf72d59ff5a133e0fae54825ceb371520646ba", "patch": "diff --git a/pyner/named_entity/inference.py b/pyner/named_entity/inference.py\n--- a/pyner/named_entity/inference.py\n+++ b/pyner/named_entity/inference.py\n@@ -1,71 +1,78 @@\n-from pyner.named_entity.dataset import converter\n-from pyner.named_entity.dataset import DatasetTransformer\n+import json\n+import logging\n+import pathlib\n+\n+import chainer\n+import click\n+\n+from pyner.named_entity.dataset import DatasetTransformer, converter\n from pyner.named_entity.recognizer import BiLSTM_CRF\n-from pyner.util.argparse import parse_inference_args\n from pyner.util.deterministic import set_seed\n from pyner.util.iterator import create_iterator\n-from pyner.util.vocab import Vocabulary\n from pyner.util.metric import select_snapshot\n-\n-import chainer\n-import pathlib\n-import logging\n-import json\n+from pyner.util.vocab import Vocabulary\n \n \n-if __name__ == '__main__':\n-    logger = logging.getLogger(__name__)\n-    fmt = '%(asctime)s : %(threadName)s : %(levelname)s : %(message)s'\n-    logging.basicConfig(level=logging.DEBUG, format=fmt)\n-    args = parse_inference_args()\n+@click.command()\n+@click.argument(\"model\")\n+@click.option(\"--epoch\", type=int, required=True)\n+@click.option(\"--device\", type=int, default=-1)\n+@click.option(\"--metric\", type=str, default=\"validation/main/fscore\")\n+def run_inference(model: str, epoch: int, device: str, metric: str):\n     chainer.config.train = False\n \n-    if args.device >= 0:\n-        chainer.cuda.get_device(args.device).use()\n+    if device >= 0:\n+        chainer.cuda.get_device(device).use()\n+\n     set_seed()\n \n-    model_dir = pathlib.Path(args.model)\n-    configs = json.load(open(model_dir / 'args'))\n+    model_dir = pathlib.Path(model)\n+    configs = json.load(open(model_dir / \"args\"))\n \n-    metric = args.metric.replace('/', '.')\n-    snapshot_file, prediction_path = select_snapshot(args, model_dir)\n-    logger.debug(f'creat prediction into {prediction_path}')\n+    metric = metric.replace(\"/\", \".\")\n+    snapshot_file, prediction_path = select_snapshot(\n+        epoch, metric, model, model_dir)\n+    logger.debug(f\"creat prediction into {prediction_path}\")\n \n     vocab = Vocabulary.prepare(configs)\n-    num_word_vocab = configs['num_word_vocab']\n-    num_char_vocab = configs['num_char_vocab']\n-    num_tag_vocab = configs['num_tag_vocab']\n+    num_word_vocab = configs[\"num_word_vocab\"]\n+    num_char_vocab = configs[\"num_char_vocab\"]\n+    num_tag_vocab = configs[\"num_tag_vocab\"]\n \n-    model = BiLSTM_CRF(\n-        configs,\n-        num_word_vocab,\n-        num_char_vocab,\n-        num_tag_vocab\n-    )\n+    model = BiLSTM_CRF(configs, num_word_vocab, num_char_vocab, num_tag_vocab)\n \n     model_path = model_dir / snapshot_file\n-    logger.debug(f'load {snapshot_file}')\n+    logger.debug(f\"load {snapshot_file}\")\n     chainer.serializers.load_npz(model_path.as_posix(), model)\n \n-    if args.device >= 0:\n-        model.to_gpu(args.device)\n+    if device >= 0:\n+        model.to_gpu(device)\n \n     transformer = DatasetTransformer(vocab)\n     transform = transformer.transform\n-    test_iterator = create_iterator(vocab, configs, 'test', transform)\n+    test_iterator = create_iterator(vocab, configs, \"test\", transform)\n \n-    with open(prediction_path, 'w', encoding='utf-8') as file:\n+    with open(prediction_path, \"w\", encoding=\"utf-8\") as file:\n         for batch in test_iterator:\n-            in_arrays, t_arrays = converter(batch, args.device)\n+            in_arrays, t_arrays = converter(batch, device)\n             p_arrays = model.predict(in_arrays)\n \n-            word_sentences, t_tag_sentences = list(zip(*transformer.itransform(\n-                in_arrays[0], t_arrays)))\n-            _, p_tag_sentences = list(zip(*transformer.itransform(\n-                in_arrays[0], p_arrays)))\n+            word_sentences, t_tag_sentences = list(\n+                zip(*transformer.itransform(in_arrays[0], t_arrays))\n+            )\n+            _, p_tag_sentences = list(\n+                zip(*transformer.itransform(in_arrays[0], p_arrays))\n+            )\n \n             sentence_gen = zip(word_sentences, t_tag_sentences, p_tag_sentences)  # NOQA\n             for ws, ts, ps in sentence_gen:\n                 for w, t, p in zip(ws, ts, ps):\n-                    print(f'{w} {t} {p}', file=file)\n+                    print(f\"{w} {t} {p}\", file=file)\n                 print(file=file)\n+\n+\n+if __name__ == \"__main__\":\n+    logger = logging.getLogger(__name__)\n+    fmt = \"%(asctime)s : %(threadName)s : %(levelname)s : %(message)s\"\n+    logging.basicConfig(level=logging.DEBUG, format=fmt)\n+    run_inference()\ndiff --git a/pyner/named_entity/train.py b/pyner/named_entity/train.py\n--- a/pyner/named_entity/train.py\n+++ b/pyner/named_entity/train.py\n@@ -1,33 +1,26 @@\n-from pyner.named_entity.dataset import converter\n-from pyner.named_entity.dataset import DatasetTransformer\n-from pyner.named_entity.evaluator import NamedEntityEvaluator\n-from pyner.named_entity.recognizer import BiLSTM_CRF\n-from pyner.util.argparse import parse_train_args\n-from pyner.util.config import ConfigParser\n-from pyner.util.deterministic import set_seed\n-from pyner.util.vocab import Vocabulary\n-from pyner.util.iterator import create_iterator\n-from pyner.util.optimizer import create_optimizer\n-from pyner.util.optimizer import add_hooks\n-from pyner.util.optimizer import LearningRateDecay\n-\n-from chainerui.utils import save_args\n+import datetime\n+import logging\n from pathlib import Path\n \n+import chainer\n import chainer.training as T\n import chainer.training.extensions as E\n-\n-import datetime\n-import chainer\n-import logging\n+import click\n import yaml\n+from chainerui.utils import save_args\n+\n+from pyner.named_entity.dataset import DatasetTransformer, converter\n+from pyner.named_entity.evaluator import NamedEntityEvaluator\n+from pyner.named_entity.recognizer import BiLSTM_CRF\n+from pyner.util.config import ConfigParser\n+from pyner.util.deterministic import set_seed\n+from pyner.util.iterator import create_iterator\n+from pyner.util.optimizer import LearningRateDecay, add_hooks, create_optimizer\n+from pyner.util.vocab import Vocabulary\n \n \n def prepare_pretrained_word_vector(\n-        word2idx,\n-        gensim_model,\n-        syn0,\n-):\n+        word2idx, gensim_model, syn0, num_word_vocab):\n \n     # if lowercased word is in pre-trained embeddings,\n     # increment match2\n@@ -45,128 +38,124 @@ def prepare_pretrained_word_vector(\n             match2 += 1\n \n     match = match1 + match2\n-    matching_rate = 100 * (match/num_word_vocab)\n-    logger.info(f'Found \\x1b[31m{matching_rate:.2f}%\\x1b[0m words in pre-trained vocab')  # NOQA\n-    logger.info(f'- num_word_vocab: \\x1b[31m{num_word_vocab}\\x1b[0m')\n-    logger.info(f'- match1: \\x1b[31m{match1}\\x1b[0m, match2: \\x1b[31m{match2}\\x1b[0m')  # NOQA\n-    return syn0\n+    matching_rate = 100 * (match / num_word_vocab)\n \n+    logger.info(f\"Found \\x1b[31m{matching_rate:.2f}%\\x1b[0m words in pre-trained vocab\")  # NOQA\n+    logger.info(f\"- num_word_vocab: \\x1b[31m{num_word_vocab}\\x1b[0m\")\n+    logger.info(f\"- match1: \\x1b[31m{match1}\\x1b[0m, match2: \\x1b[31m{match2}\\x1b[0m\")  # NOQA\n+    return syn0\n \n-if __name__ == '__main__':\n-    logger = logging.getLogger(__name__)\n-    fmt = '[%(name)s] %(asctime)s : %(threadName)s : %(levelname)s : %(message)s'  # NOQA\n-    logging.basicConfig(level=logging.DEBUG, format=fmt)\n \n-    args = parse_train_args()\n-    params = yaml.load(open(args.config, encoding='utf-8'))\n+@click.command()\n+@click.argument(\"config\", type=str)\n+@click.option(\"--device\", type=int, default=-1)\n+@click.option(\"--seed\", type=int, default=31)\n+def run_training(config: str, device: int, seed: int):\n+    params = yaml.load(open(config, encoding=\"utf-8\"))\n \n-    if args.device >= 0:\n-        chainer.cuda.get_device(args.device).use()\n+    if device >= 0:\n+        chainer.cuda.get_device(device).use()\n \n-    set_seed(args.seed, args.device)\n+    set_seed(seed, device)\n \n-    configs = ConfigParser.parse(args.config)\n-    config_path = Path(args.config)\n+    configs = ConfigParser.parse(config)\n \n     vocab = Vocabulary.prepare(configs)\n-    num_word_vocab = max(vocab.dictionaries['word2idx'].values()) + 1\n-    num_char_vocab = max(vocab.dictionaries['char2idx'].values()) + 1\n-    num_tag_vocab = max(vocab.dictionaries['tag2idx'].values()) + 1\n-\n-    model = BiLSTM_CRF(\n-        configs,\n-        num_word_vocab,\n-        num_char_vocab,\n-        num_tag_vocab\n-    )\n+    num_word_vocab = max(vocab.dictionaries[\"word2idx\"].values()) + 1\n+    num_char_vocab = max(vocab.dictionaries[\"char2idx\"].values()) + 1\n+    num_tag_vocab = max(vocab.dictionaries[\"tag2idx\"].values()) + 1\n+\n+    model = BiLSTM_CRF(configs, num_word_vocab, num_char_vocab, num_tag_vocab)\n \n     transformer = DatasetTransformer(vocab)\n     transform = transformer.transform\n \n-    external_configs = configs['external']\n-    preprocessing_configs = configs['preprocessing']\n-    if 'word_vector' in external_configs:\n+    external_configs = configs[\"external\"]\n+    if \"word_vector\" in external_configs:\n         syn0 = model.embed_word.W.data\n         _, word_dim = syn0.shape\n         pre_word_dim = vocab.gensim_model.vector_size\n         if word_dim != pre_word_dim:\n-            msg = 'Mismatch vector size between model and pre-trained word vectors'  # NOQA\n-            msg += f'(model: \\x1b[31m{word_dim}\\x1b[0m'\n-            msg += f', pre-trained word vector: \\x1b[31m{pre_word_dim}\\x1b[0m'\n+            msg = \"Mismatch vector size between model and pre-trained word vectors\"  # NOQA\n+            msg += f\"(model: \\x1b[31m{word_dim}\\x1b[0m\"\n+            msg += f\", pre-trained word vector: \\x1b[31m{pre_word_dim}\\x1b[0m\"\n             raise Exception(msg)\n \n-        word2idx = vocab.dictionaries['word2idx']\n+        word2idx = vocab.dictionaries[\"word2idx\"]\n         syn0 = prepare_pretrained_word_vector(\n-            word2idx,\n-            vocab.gensim_model,\n-            syn0\n-        )\n+            word2idx, vocab.gensim_model, syn0, num_word_vocab)\n         model.set_pretrained_word_vectors(syn0)\n \n-    train_iterator = create_iterator(vocab, configs, 'train', transform)\n-    valid_iterator = create_iterator(vocab, configs, 'validation', transform)\n-    test_iterator = create_iterator(vocab, configs, 'test', transform)\n+    train_iterator = create_iterator(vocab, configs, \"train\", transform)\n+    valid_iterator = create_iterator(vocab, configs, \"validation\", transform)\n+    test_iterator = create_iterator(vocab, configs, \"test\", transform)\n \n-    if args.device >= 0:\n-        model.to_gpu(args.device)\n+    if device >= 0:\n+        model.to_gpu(device)\n \n     optimizer = create_optimizer(configs)\n     optimizer.setup(model)\n     optimizer = add_hooks(optimizer, configs)\n \n-    updater = T.StandardUpdater(train_iterator, optimizer,\n-                                converter=converter,\n-                                device=args.device)\n+    updater = T.StandardUpdater(\n+        train_iterator, optimizer, converter=converter, device=device\n+    )\n \n     params = configs.export()\n-    params['num_word_vocab'] = num_word_vocab\n-    params['num_char_vocab'] = num_char_vocab\n-    params['num_tag_vocab'] = num_tag_vocab\n+    params[\"num_word_vocab\"] = num_word_vocab\n+    params[\"num_char_vocab\"] = num_char_vocab\n+    params[\"num_tag_vocab\"] = num_tag_vocab\n \n-    epoch = configs['iteration']['epoch']\n-    trigger = (epoch, 'epoch')\n+    epoch = configs[\"iteration\"][\"epoch\"]\n+    trigger = (epoch, \"epoch\")\n \n-    model_path = configs['output']\n+    model_path = configs[\"output\"]\n     timestamp = datetime.datetime.now()\n     timestamp_str = timestamp.isoformat()\n-    output_path = Path(f'{model_path}.{timestamp_str}')\n+    output_path = Path(f\"{model_path}.{timestamp_str}\")\n \n-    trainer = T.Trainer(\n-        updater,\n-        trigger,\n-        out=output_path\n-    )\n+    trainer = T.Trainer(updater, trigger, out=output_path)\n     save_args(params, output_path)\n-    msg = f'Create \\x1b[31m{output_path}\\x1b[0m for saving model snapshots'\n+    msg = f\"Create \\x1b[31m{output_path}\\x1b[0m for saving model snapshots\"\n     logging.debug(msg)\n \n-    entries = ['epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss']\n-    entries += ['validation/main/loss', 'validation/main/fscore']\n-    entries += ['validation_1/main/loss', 'validation_1/main/fscore']\n+    entries = [\"epoch\", \"iteration\", \"elapsed_time\", \"lr\", \"main/loss\"]\n+    entries += [\"validation/main/loss\", \"validation/main/fscore\"]\n+    entries += [\"validation_1/main/loss\", \"validation_1/main/fscore\"]\n \n-    valid_evaluator = NamedEntityEvaluator(valid_iterator, model,\n-                                           transformer.itransform,\n-                                           converter, device=args.device)\n+    valid_evaluator = NamedEntityEvaluator(\n+        valid_iterator, model, transformer.itransform, converter, device=device\n+    )\n \n-    test_evaluator = NamedEntityEvaluator(test_iterator, model,\n-                                          transformer.itransform,\n-                                          converter, device=args.device)\n+    test_evaluator = NamedEntityEvaluator(\n+        test_iterator, model, transformer.itransform, converter, device=device\n+    )\n \n-    epoch_trigger = (1, 'epoch')\n-    snapshot_filename = 'snapshot_epoch_{.updater.epoch:04d}'\n+    epoch_trigger = (1, \"epoch\")\n+    snapshot_filename = \"snapshot_epoch_{.updater.epoch:04d}\"\n     trainer.extend(valid_evaluator, trigger=epoch_trigger)\n     trainer.extend(test_evaluator, trigger=epoch_trigger)\n     trainer.extend(E.observe_lr(), trigger=epoch_trigger)\n     trainer.extend(E.LogReport(trigger=epoch_trigger))\n     trainer.extend(E.PrintReport(entries=entries), trigger=epoch_trigger)\n     trainer.extend(E.ProgressBar(update_interval=20))\n-    trainer.extend(E.snapshot_object(model, filename=snapshot_filename),\n-                   trigger=(1, 'epoch'))\n-\n-    if 'learning_rate_decay' in params:\n-        logger.debug('Enable Learning Rate decay')\n-        trainer.extend(LearningRateDecay('lr', params['learning_rate'],\n-                                         params['learning_rate_decay']),\n-                       trigger=epoch_trigger)\n+    trainer.extend(E.snapshot_object(\n+        model, filename=snapshot_filename), trigger=(1, \"epoch\"))\n+\n+    if \"learning_rate_decay\" in params:\n+        logger.debug(\"Enable Learning Rate decay\")\n+        trainer.extend(\n+            LearningRateDecay(\n+                \"lr\", params[\"learning_rate\"], params[\"learning_rate_decay\"]),\n+            trigger=epoch_trigger,\n+        )\n \n     trainer.run()\n+\n+\n+if __name__ == \"__main__\":\n+    logger = logging.getLogger(__name__)\n+    fmt = \"%(asctime)s: %(message)s\"\n+    logging.basicConfig(level=logging.DEBUG, format=fmt)\n+\n+    run_training()\ndiff --git a/pyner/tool/corpus/parse_CoNLL2003.py b/pyner/tool/corpus/parse_CoNLL2003.py\n--- a/pyner/tool/corpus/parse_CoNLL2003.py\n+++ b/pyner/tool/corpus/parse_CoNLL2003.py\n@@ -1,13 +1,9 @@\n-from pyner.tool.corpus.common import CorpusParser\n-from pyner.tool.corpus.common import write_sentences\n-from pyner.tool.corpus.common import write_vocab\n-from pyner.tool.corpus.common import enum\n-\n-\n-import argparse\n-import pathlib\n import logging\n+import pathlib\n \n+import click\n+from pyner.tool.corpus.common import (CorpusParser, enum, write_sentences,\n+                                      write_vocab)\n \n SEED = 42\n BOS = 0  # begin of step\n@@ -15,61 +11,70 @@\n XXX = 2  # other\n \n \n-if __name__ == '__main__':\n-    fmt = \"%(asctime)s %(levelname)s %(name)s :%(message)s\"\n-    logging.basicConfig(level=logging.DEBUG, format=fmt)\n-\n-    parser = argparse.ArgumentParser()\n-    parser.add_argument('--data-dir', default='./data/external/CoNLL2003')\n-    parser.add_argument('--output-dir', default='./data/processed/CoNLL2003')\n-    parser.add_argument('--format')\n-    args = parser.parse_args()\n-\n-    logging.info('create dataset for CoNLL2003')\n+@click.command()\n+@click.option(\"--data-dir\", default=\"./data/external/CoNLL2003\", type=str)\n+@click.option(\"--output-dir\", default=\"./data/processed/CoNLL2003\", type=str)\n+@click.option(\"--format\", default=\"iob2bio\", type=str)\n+def main(data_dir: str, output_dir: str, format: str):\n+    logging.info(\"create dataset for CoNLL2003\")\n \n-    data_path = pathlib.Path(args.data_dir)\n-    output_path = pathlib.Path(args.output_dir)\n+    data_path = pathlib.Path(data_dir)\n+    output_path = pathlib.Path(output_dir)\n     output_path.mkdir(exist_ok=True, parents=True)\n \n-    logging.info('create corpus parser')\n-    corpus_parser = CorpusParser(args.format)\n-\n-    logging.info('parsing corpus for training')\n-    train_word_sentences, train_tag_sentences = \\\n-        corpus_parser.parse_file(data_path / 'eng.iob.train', word_idx=0)\n-    train_words, train_chars, train_tags = \\\n-        enum(train_word_sentences, train_tag_sentences)\n-\n-    logging.info('parsing corpus for validating')\n-    validation_word_sentences, validation_tag_sentences = \\\n-        corpus_parser.parse_file(data_path / 'eng.iob.testa', word_idx=0)\n-    validation_words, validation_chars, validation_tags = \\\n-        enum(validation_word_sentences, validation_tag_sentences)\n-\n-    logging.info('parsing corpus for testing')\n-    test_word_sentences, test_tag_sentences = \\\n-        corpus_parser.parse_file(data_path / 'eng.iob.testb', word_idx=0)\n-    test_words, test_chars, test_tags = \\\n-        enum(test_word_sentences, test_tag_sentences)\n+    logging.info(\"create corpus parser\")\n+    corpus_parser = CorpusParser(format)\n+\n+    logging.info(\"parsing corpus for training\")\n+    train_word_sentences, train_tag_sentences = corpus_parser.parse_file(\n+        data_path / \"eng.iob.train\", word_idx=0\n+    )\n+    train_words, train_chars, train_tags = enum(\n+        train_word_sentences, train_tag_sentences\n+    )\n+\n+    logging.info(\"parsing corpus for validating\")\n+    validation_word_sentences, validation_tag_sentences = corpus_parser.parse_file(  # NOQA\n+        data_path / \"eng.iob.testa\", word_idx=0\n+    )\n+    validation_words, validation_chars, validation_tags = enum(\n+        validation_word_sentences, validation_tag_sentences\n+    )\n+\n+    logging.info(\"parsing corpus for testing\")\n+    test_word_sentences, test_tag_sentences = corpus_parser.parse_file(\n+        data_path / \"eng.iob.testb\", word_idx=0\n+    )\n+    test_words, test_chars, test_tags = enum(\n+        test_word_sentences, test_tag_sentences\n+    )\n \n     # NOTE create vocabularies only using training dataset\n     words = train_words\n     chars = train_chars\n     tags = train_tags\n \n-    logging.info('Create training dataset')\n-    write_sentences('train', 'words', train_word_sentences, output_path)\n-    write_sentences('train', 'tags', train_tag_sentences, output_path)\n+    logging.info(\"Create training dataset\")\n+    write_sentences(\"train\", \"words\", train_word_sentences, output_path)\n+    write_sentences(\"train\", \"tags\", train_tag_sentences, output_path)\n+\n+    logging.info(\"Create validating dataset\")\n+    write_sentences(\n+        \"validation\", \"words\", validation_word_sentences, output_path\n+    )  # NOQA\n+    write_sentences(\"validation\", \"tags\", validation_tag_sentences, output_path)  # NOQA\n \n-    logging.info('Create validating dataset')\n-    write_sentences('validation', 'words', validation_word_sentences, output_path)  # NOQA\n-    write_sentences('validation', 'tags', validation_tag_sentences, output_path)  # NOQA\n+    logging.info(\"Create testing dataset\")\n+    write_sentences(\"test\", \"words\", test_word_sentences, output_path)\n+    write_sentences(\"test\", \"tags\", test_tag_sentences, output_path)\n \n-    logging.info('Create testing dataset')\n-    write_sentences('test', 'words', test_word_sentences, output_path)\n-    write_sentences('test', 'tags', test_tag_sentences, output_path)\n+    logging.info(\"Create vocabulary\")\n+    write_vocab(\"words\", words, output_path)\n+    write_vocab(\"chars\", chars, output_path)\n+    write_vocab(\"tags\", tags, output_path)\n \n-    logging.info('Create vocabulary')\n-    write_vocab('words', words, output_path)\n-    write_vocab('chars', chars, output_path)\n-    write_vocab('tags', tags, output_path)\n+\n+if __name__ == \"__main__\":\n+    fmt = \"%(asctime)s %(levelname)s %(name)s :%(message)s\"\n+    logging.basicConfig(level=logging.DEBUG, format=fmt)\n+    main()\ndiff --git a/pyner/tool/vector/__init__.py b/pyner/tool/vector/__init__.py\nnew file mode 100644\ndiff --git a/pyner/tool/vector/glove2skipgram.py b/pyner/tool/vector/glove2skipgram.py\ndeleted file mode 100644\n--- a/pyner/tool/vector/glove2skipgram.py\n+++ /dev/null\n@@ -1,22 +0,0 @@\n-from gensim.scripts.glove2word2vec import glove2word2vec\n-from gensim.models import KeyedVectors\n-from pathlib import Path\n-import argparse\n-\n-\n-if __name__ == '__main__':\n-    parser = argparse.ArgumentParser()\n-    parser.add_argument('input_file')\n-    parser.add_argument('output_file')\n-    args = parser.parse_args()\n-\n-    input_file = Path(args.input_file)\n-    output_file = Path(args.output_file)\n-\n-    tmp_file = '/tmp/w2v.tmp'\n-    glove2word2vec(args.input_file, tmp_file)\n-    model = KeyedVectors.load_word2vec_format(tmp_file)\n-    print('loaded GloVe embeddings')\n-\n-    model.save(output_file.as_posix())\n-    print('saved model')\ndiff --git a/pyner/tool/vector/prepare_embeddings.py b/pyner/tool/vector/prepare_embeddings.py\nnew file mode 100644\n--- /dev/null\n+++ b/pyner/tool/vector/prepare_embeddings.py\n@@ -0,0 +1,31 @@\n+import pathlib\n+\n+import click\n+from gensim.models import KeyedVectors\n+from gensim.scripts.glove2word2vec import glove2word2vec\n+\n+\n+@click.command()\n+@click.argument(\"input_file\",  type=str)  # NOQA\n+@click.argument(\"output_file\", type=str)\n+@click.option(\"--format\", default=\"word2vec\", type=str)\n+def main(input_file: str, output_file: str, format: str):\n+    input_file = pathlib.Path(input_file)  # NOQA\n+    output_file = pathlib.Path(output_file)\n+\n+    if format == \"glove\":\n+        tmp_file = \"/tmp/w2v.tmp\"\n+        glove2word2vec(input_file, tmp_file)\n+        model = KeyedVectors.load_word2vec_format(tmp_file)\n+        print(\"loaded GloVe embeddings\")\n+\n+    elif format == \"word2vec\":\n+        model = KeyedVectors.load_word2vec_format(input_file)\n+        print(\"loaded Word2vec embeddings\")\n+\n+    model.save(output_file.as_posix())\n+    print(\"saved model\")\n+\n+\n+if __name__ == \"__main__\":\n+    main()\ndiff --git a/pyner/tool/vector/word2vec2gensim.py b/pyner/tool/vector/word2vec2gensim.py\ndeleted file mode 100644\n--- a/pyner/tool/vector/word2vec2gensim.py\n+++ /dev/null\n@@ -1,19 +0,0 @@\n-from gensim.models import KeyedVectors\n-from pathlib import Path\n-import argparse\n-\n-\n-if __name__ == '__main__':\n-    parser = argparse.ArgumentParser()\n-    parser.add_argument('input_file')\n-    parser.add_argument('output_file')\n-    args = parser.parse_args()\n-\n-    input_file = Path(args.input_file)\n-    output_file = Path(args.output_file)\n-\n-    model = KeyedVectors.load_word2vec_format(args.input_file)\n-    print('loaded pre-trained word2vec model')\n-\n-    model.save(output_file.as_posix())\n-    print('saved model')\ndiff --git a/pyner/util/__init__.py b/pyner/util/__init__.py\n--- a/pyner/util/__init__.py\n+++ b/pyner/util/__init__.py\n@@ -1,4 +1,3 @@\n-from .argparse import *\n from .config import *\n from .deterministic import *\n from .iterator import *\ndiff --git a/pyner/util/argparse.py b/pyner/util/argparse.py\ndeleted file mode 100644\n--- a/pyner/util/argparse.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-import argparse\n-import logging\n-\n-\n-logger = logging.getLogger(__name__)\n-\n-\n-def parse_train_args():\n-    parser = argparse.ArgumentParser()\n-    parser.add_argument('config')\n-    parser.add_argument('--gpu', type=int, default=-1, dest='device')\n-    parser.add_argument('--seed', type=int, default=31)\n-    args = parser.parse_args()\n-    return args\n-\n-\n-def parse_inference_args():\n-    parser = argparse.ArgumentParser()\n-    parser.add_argument('model')\n-    parser.add_argument('--epoch', type=int)\n-    parser.add_argument('--gpu', type=int, dest='device', default=-1)\n-    parser.add_argument('--metric', default='validation/main/fscore')\n-    args = parser.parse_args()\n-    return args\ndiff --git a/pyner/util/metric.py b/pyner/util/metric.py\n--- a/pyner/util/metric.py\n+++ b/pyner/util/metric.py\n@@ -1,5 +1,6 @@\n from pathlib import Path\n \n+import typing\n import operator\n import logging\n import json\n@@ -8,17 +9,18 @@\n logger = logging.getLogger(__name__)\n \n \n-def select_snapshot(args, model_dir):\n-    if args.epoch is None:\n-        epoch, max_value = argmax_metric(model_dir / 'log', args.metric)\n-        logger.debug(f'Epoch is {epoch:04d} ({args.metric}: {max_value:.2f})')  # NOQA\n-        metric_repr = args.metric.replace('/', '.')\n-        prediction_path = Path(args.model, f'{metric_repr}.epoch_{epoch:03d}.pred')  # NOQA\n+def select_snapshot(\n+        epoch: int, metric: typing.Optional[str], model: str, model_dir: str):\n+    if epoch is None:\n+        epoch, max_value = argmax_metric(model_dir / 'log', metric)\n+        logger.debug(f'Epoch is {epoch:04d} ({metric}: {max_value:.2f})')  # NOQA\n+        metric_repr = metric.replace('/', '.')\n+        prediction_path = Path(model, f'{metric_repr}.epoch_{epoch:03d}.pred')  # NOQA\n \n     else:\n-        epoch = args.epoch\n+        epoch = epoch\n         logger.debug(f'Epoch is {epoch:04d} (which is specified manually)')\n-        prediction_path = Path(args.model, f'epoch_{epoch:03d}.pred')\n+        prediction_path = Path(model, f'epoch_{epoch:03d}.pred')\n \n     snapshot_file = f'snapshot_epoch_{epoch:04d}'\n     return snapshot_file, prediction_path\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-08-02T23:59:35Z"}