[paths] train = "./data/sv_merged-upos-train.spacy" dev = "./data/sv_merged-upos-dev.spacy" vectors = null init_tok2vec = null [system] gpu_allocator = "pytorch" seed = 0 [nlp] lang = "sv" pipeline = ["transformer","tagger","parser","transformer_ner","ner","attribute_ruler","lemmatizer"] batch_size = 128 disabled = [] before_creation = null after_creation = null after_pipeline_creation = null tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"} [components] [components.attribute_ruler] factory = "attribute_ruler" validate = false [components.lemmatizer] factory = "lemmatizer" mode = "lookup" model = null overwrite = false [components.ner] factory = "ner" moves = null update_with_oracle_cut_size = 100 [components.ner.model] @architectures = "spacy.TransitionBasedParser.v2" state_type = "ner" extra_state_tokens = false hidden_width = 64 maxout_pieces = 2 use_upper = false nO = null [components.ner.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 pooling = {"@layers":"reduce_mean.v1"} upstream = "*" [components.parser] factory = "parser" learn_tokens = false min_action_freq = 30 moves = null update_with_oracle_cut_size = 100 [components.parser.model] @architectures = "spacy.TransitionBasedParser.v2" state_type = "parser" extra_state_tokens = false hidden_width = 128 maxout_pieces = 3 use_upper = false nO = null [components.parser.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 pooling = {"@layers":"reduce_mean.v1"} upstream = "*" [components.tagger] factory = "tagger" [components.tagger.model] @architectures = "spacy.Tagger.v1" nO = null [components.tagger.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 pooling = {"@layers":"reduce_mean.v1"} upstream = "*" [components.transformer] factory = "transformer" max_batch_items = 4096 set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"} [components.transformer.model] @architectures = "spacy-transformers.TransformerModel.v1" name = "KB/bert-base-swedish-cased" [components.transformer.model.get_spans] @span_getters = "spacy-transformers.strided_spans.v1" window = 128 stride = 96 [components.transformer.model.tokenizer_config] use_fast = true [components.transformer_ner] factory = "transformer" max_batch_items = 4096 set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"} [components.transformer_ner.model] @architectures = "spacy-transformers.TransformerModel.v1" name = "KB/bert-base-swedish-cased" [components.transformer_ner.model.get_spans] @span_getters = "spacy-transformers.strided_spans.v1" window = 128 stride = 96 [components.transformer_ner.model.tokenizer_config] use_fast = true [corpora] [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths.dev} max_length = 0 gold_preproc = false limit = 0 augmenter = null [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths.train} max_length = 500 gold_preproc = false limit = 0 augmenter = null [training] accumulate_gradient = 3 dev_corpus = "corpora.dev" train_corpus = "corpora.train" seed = ${system.seed} gpu_allocator = ${system.gpu_allocator} dropout = 0.1 patience = 1600 max_epochs = 0 max_steps = 20000 eval_frequency = 200 frozen_components = [] before_to_disk = null [training.batcher] @batchers = "spacy.batch_by_padded.v1" discard_oversize = true size = 2000 buffer = 256 get_length = null [training.logger] @loggers = "spacy.ConsoleLogger.v1" progress_bar = false [training.optimizer] @optimizers = "Adam.v1" beta1 = 0.9 beta2 = 0.999 L2_is_weight_decay = true L2 = 0.01 grad_clip = 1.0 use_averages = false eps = 0.00000001 [training.optimizer.learn_rate] @schedules = "warmup_linear.v1" warmup_steps = 250 total_steps = 20000 initial_rate = 0.00005 [training.score_weights] dep_las_per_type = null sents_p = null sents_r = null ents_per_type = null tag_acc = 0.25 dep_uas = 0.12 dep_las = 0.12 sents_f = 0.0 ents_f = 0.25 ents_p = 0.0 ents_r = 0.0 lemma_acc = 0.25 [pretraining] [initialize] vectors = null init_tok2vec = ${paths.init_tok2vec} vocab_data = null lookups = null before_init = null after_init = null [initialize.components] [initialize.tokenizer]