[paths] train = "./data/spacy_format/train_med7_v34.spacy" dev = "./data/spacy_format/dev_med7_v34.spacy" vectors = "en_core_web_lg" init_tok2vec = "/mnt/sdf/andrey/projects/med7_v3/output_pretrain_lg/model169.bin" [system] gpu_allocator = "pytorch" seed = 0 [nlp] lang = "en" pipeline = ["transformer","ner"] batch_size = 64 disabled = [] before_creation = null after_creation = null after_pipeline_creation = null tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"} [components] [components.ner] factory = "ner" incorrect_spans_key = null moves = null scorer = {"@scorers":"spacy.ner_scorer.v1"} update_with_oracle_cut_size = 100 [components.ner.model] @architectures = "spacy.TransitionBasedParser.v2" state_type = "ner" extra_state_tokens = false hidden_width = 128 maxout_pieces = 2 use_upper = false nO = null [components.ner.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 pooling = {"@layers":"reduce_mean.v1"} upstream = "*" [components.transformer] factory = "transformer" max_batch_items = 4096 set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"} [components.transformer.model] @architectures = "spacy-transformers.TransformerModel.v3" name = "/mnt/sdf/andrey/projects/med7_v3/RoBERTa-base-PM-M3-hf" mixed_precision = false [components.transformer.model.get_spans] @span_getters = "spacy-transformers.strided_spans.v1" window = 256 stride = 96 [components.transformer.model.grad_scaler_config] [components.transformer.model.tokenizer_config] use_fast = true [components.transformer.model.transformer_config] [corpora] [corpora.dev] @readers = "spacy.Corpus.v1" path = ${paths.dev} max_length = 0 gold_preproc = false limit = 0 augmenter = null [corpora.train] @readers = "spacy.Corpus.v1" path = ${paths.train} max_length = 0 gold_preproc = false limit = 0 augmenter = null [training] accumulate_gradient = 3 dev_corpus = "corpora.dev" train_corpus = "corpora.train" seed = ${system.seed} gpu_allocator = ${system.gpu_allocator} dropout = 0.1 patience = 3600 max_epochs = 0 max_steps = 20000 eval_frequency = 200 frozen_components = [] annotating_components = [] before_to_disk = null [training.batcher] @batchers = "spacy.batch_by_padded.v1" discard_oversize = true size = 1000 buffer = 256 get_length = null [training.logger] @loggers = "spacy.ConsoleLogger.v1" progress_bar = true [training.optimizer] @optimizers = "Adam.v1" beta1 = 0.9 beta2 = 0.999 L2_is_weight_decay = true L2 = 0.01 grad_clip = 1.0 use_averages = false eps = 0.00000001 [training.optimizer.learn_rate] @schedules = "warmup_linear.v1" warmup_steps = 250 total_steps = 20000 initial_rate = 0.00005 [training.score_weights] ents_f = 1.0 ents_p = 0.0 ents_r = 0.0 ents_per_type = null [pretraining] [initialize] vectors = ${paths.vectors} init_tok2vec = ${paths.init_tok2vec} vocab_data = null before_init = null after_init = null [initialize.components] [initialize.lookups] @misc = "spacy.LookupsDataLoader.v1" lang = ${nlp.lang} tables = ["lexeme_norm"] [initialize.tokenizer]