Spaces:
Runtime error
Runtime error
File size: 4,922 Bytes
7900c16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
"""
This script provides an example to wrap TencentPretrain for NER inference.
"""
import sys
import os
import argparse
import json
import torch
import torch.nn as nn
tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(tencentpretrain_dir)
from tencentpretrain.utils.config import load_hyperparam
from tencentpretrain.utils.constants import *
from tencentpretrain.utils.tokenizers import *
from tencentpretrain.model_loader import load_model
from tencentpretrain.opts import infer_opts
from finetune.run_ner import NerTagger
def read_dataset(args, path):
dataset, columns = [], {}
with open(path, mode="r", encoding="utf-8") as f:
for line_id, line in enumerate(f):
if line_id == 0:
for i, column_name in enumerate(line.rstrip("\r\n").split("\t")):
columns[column_name] = i
continue
line = line.rstrip("\r\n").split("\t")
text_a = line[columns["text_a"]]
src = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_a))
seg = [1] * len(src)
if len(src) > args.seq_length:
src = src[:args.seq_length]
seg = seg[:args.seq_length]
PAD_ID = args.tokenizer.convert_tokens_to_ids([PAD_TOKEN])[0]
while len(src) < args.seq_length:
src.append(PAD_ID)
seg.append(0)
dataset.append([src, seg])
return dataset
def batch_loader(batch_size, src, seg):
instances_num = src.size()[0]
for i in range(instances_num // batch_size):
src_batch = src[i * batch_size : (i + 1) * batch_size, :]
seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]
yield src_batch, seg_batch
if instances_num > instances_num // batch_size * batch_size:
src_batch = src[instances_num // batch_size * batch_size :, :]
seg_batch = seg[instances_num // batch_size * batch_size :, :]
yield src_batch, seg_batch
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
infer_opts(parser)
parser.add_argument("--vocab_path", default=None, type=str,
help="Path of the vocabulary file.")
parser.add_argument("--spm_model_path", default=None, type=str,
help="Path of the sentence piece model.")
parser.add_argument("--label2id_path", type=str, required=True,
help="Path of the label2id file.")
parser.add_argument("--crf_target", action="store_true",
help="Use CRF loss as the target function or not, default False.")
args = parser.parse_args()
# Load the hyperparameters of the config file.
args = load_hyperparam(args)
with open(args.label2id_path, mode="r", encoding="utf-8") as f:
l2i = json.load(f)
print("Labels: ", l2i)
l2i["[PAD]"] = len(l2i)
i2l = {}
for key, value in l2i.items():
i2l[value] = key
args.l2i = l2i
args.labels_num = len(l2i)
# Load tokenizer.
args.tokenizer = SpaceTokenizer(args)
# Build sequence labeling model.
model = NerTagger(args)
model = load_model(model, args.load_model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
instances = read_dataset(args, args.test_path)
src = torch.LongTensor([ins[0] for ins in instances])
seg = torch.LongTensor([ins[1] for ins in instances])
instances_num = src.size(0)
batch_size = args.batch_size
print("The number of prediction instances: ", instances_num)
model.eval()
with open(args.prediction_path, mode="w", encoding="utf-8") as f:
f.write("pred_label" + "\n")
for i, (src_batch, seg_batch) in enumerate(batch_loader(batch_size, src, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
with torch.no_grad():
_, pred = model(src_batch, None, seg_batch)
# Storing sequence length of instances in a batch.
seq_length_batch = []
for seg in seg_batch.cpu().numpy().tolist():
for j in range(len(seg) - 1, -1, -1):
if seg[j] != 0:
break
seq_length_batch.append(j+1)
pred = pred.cpu().numpy().tolist()
for j in range(0, len(pred), args.seq_length):
for label_id in pred[j: j + seq_length_batch[j // args.seq_length]]:
f.write(i2l[label_id] + " ")
f.write("\n")
if __name__ == "__main__":
main()
|