[paths] train = null dev = null raw = null init_tok2vec = null [system] seed = 342 gpu_allocator = null [nlp] lang = "en" pipeline = ["tok2vec", "relation_extractor"] disabled = [] before_creation = null after_creation = null after_pipeline_creation = null tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"} batch_size = 512 [components] [components.tok2vec] factory = "tok2vec" [components.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = null width = 96 depth = 2 embed_size = 2000 window_size = 1 maxout_pieces = 3 subword_features = true [components.relation_extractor] factory = "relation_extractor" threshold = 0.5 [components.relation_extractor.model] @architectures = "rel_model.v1" [components.relation_extractor.model.create_instance_tensor] @architectures = "rel_instance_tensor.v1" [components.relation_extractor.model.create_instance_tensor.tok2vec] @architectures = "spacy.Tok2VecListener.v1" width = ${components.tok2vec.model.width} [components.relation_extractor.model.create_instance_tensor.pooling] @layers = "reduce_mean.v1" [components.relation_extractor.model.create_instance_tensor.get_instances] @misc = "rel_instance_generator.v1" max_length = 1000 [components.relation_extractor.model.classification_layer] @architectures = "rel_classification_layer.v1" nI = null nO = null [initialize] [initialize.components] [corpora] [corpora.dev] @readers = "Gold_ents_Corpus.v1" file = ${paths.dev} [corpora.train] @readers = "Gold_ents_Corpus.v1" file = ${paths.train} [training] seed = ${system.seed} gpu_allocator = ${system.gpu_allocator} dropout = 0.1 accumulate_gradient = 1 patience = 160000000 max_epochs = 0 max_steps = 10000000 eval_frequency = 500 frozen_components = [] dev_corpus = "corpora.dev" train_corpus = "corpora.train" before_to_disk = null logger = {"@loggers":"spacy.ConsoleLogger.v1"} [training.batcher] @batchers = "spacy.batch_by_words.v1" discard_oversize = false tolerance = 0.2 [training.batcher.size] @schedules = "compounding.v1" start = 100 stop = 1000 compound = 1.001 [training.optimizer] @optimizers = "Adam.v1" beta1 = 0.9 beta2 = 0.999 L2_is_weight_decay = true L2 = 0.01 grad_clip = 1.0 use_averages = false eps = 0.00000001 learn_rate = 0.001 [training.score_weights] rel_micro_p = 0.0 rel_micro_r = 0.0 rel_micro_f = 1.0