|
[paths] |
|
train = null |
|
dev = null |
|
raw = null |
|
init_tok2vec = null |
|
|
|
[system] |
|
seed = 342 |
|
gpu_allocator = "pytorch" |
|
|
|
[nlp] |
|
lang = "en" |
|
pipeline = ["transformer", "relation_extractor"] |
|
disabled = [] |
|
before_creation = null |
|
after_creation = null |
|
after_pipeline_creation = null |
|
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"} |
|
batch_size =256 |
|
|
|
[components] |
|
|
|
[components.transformer] |
|
source = "./chemrelmodels/rel/trf" |
|
component = "transformer" |
|
|
|
[components.transformer.model] |
|
@architectures = "spacy-transformers.TransformerModel.v1" |
|
name = "roberta-base" |
|
tokenizer_config = {"use_fast": true} |
|
|
|
[components.transformer.model.get_spans] |
|
@span_getters = "spacy-transformers.strided_spans.v1" |
|
window = 256 |
|
stride = 64 |
|
|
|
[components.relation_extractor] |
|
source = "./chemrelmodels/rel/trf" |
|
component = "relation_extractor" |
|
|
|
[components.relation_extractor.model] |
|
@architectures = "rel_model.v1" |
|
|
|
[components.relation_extractor.model.create_instance_tensor] |
|
@architectures = "rel_instance_tensor.v1" |
|
|
|
[components.relation_extractor.model.create_instance_tensor.tok2vec] |
|
@architectures = "spacy-transformers.TransformerListener.v1" |
|
grad_factor = 1.0 |
|
|
|
[components.relation_extractor.model.create_instance_tensor.tok2vec.pooling] |
|
@layers = "reduce_mean.v1" |
|
|
|
[components.relation_extractor.model.create_instance_tensor.pooling] |
|
@layers = "reduce_mean.v1" |
|
|
|
[components.relation_extractor.model.create_instance_tensor.get_instances] |
|
@misc = "rel_instance_generator.v1" |
|
max_length = 400 |
|
|
|
[components.relation_extractor.model.classification_layer] |
|
@architectures = "rel_classification_layer.v1" |
|
nI = null |
|
nO = null |
|
|
|
[initialize] |
|
|
|
[initialize.components] |
|
|
|
[corpora] |
|
|
|
[corpora.dev] |
|
@readers = "Gold_ents_Corpus.v1" |
|
file = ${paths.dev} |
|
|
|
[corpora.train] |
|
@readers = "Gold_ents_Corpus.v1" |
|
file = ${paths.train} |
|
|
|
[training] |
|
seed = ${system.seed} |
|
gpu_allocator = ${system.gpu_allocator} |
|
dropout = 0.1 |
|
accumulate_gradient = 1 |
|
patience = 16000000000 |
|
max_epochs = 0 |
|
max_steps = 1000000000 |
|
eval_frequency = 10 |
|
frozen_components = [] |
|
dev_corpus = "corpora.dev" |
|
train_corpus = "corpora.train" |
|
before_to_disk = null |
|
logger = {"@loggers":"spacy.ConsoleLogger.v1"} |
|
|
|
[training.batcher] |
|
@batchers = "spacy.batch_by_padded.v1" |
|
discard_oversize = true |
|
size = 1024 |
|
buffer = 128 |
|
|
|
[training.optimizer] |
|
@optimizers = "Adam.v1" |
|
beta1 = 0.9 |
|
beta2 = 0.999 |
|
L2_is_weight_decay = true |
|
L2 = 0.01 |
|
grad_clip = 1.0 |
|
use_averages = false |
|
eps = 0.00000001 |
|
|
|
[training.optimizer.learn_rate] |
|
@schedules = "warmup_linear.v1" |
|
warmup_steps = 250 |
|
total_steps = 20000 |
|
initial_rate = 1e-6 |
|
|
|
[training.score_weights] |
|
rel_micro_p = 0.0 |
|
rel_micro_r = 0.0 |
|
rel_micro_f = 1.0 |