title: "Demo spancat in a new pipeline (Span Categorization)" description: "A minimal demo spancat project for spaCy v3" # Variables can be referenced across the project.yml using ${vars.var_name} vars: name: "placing_holocaust" lang: "en" annotations_file: "annotated_data_spans.jsonl" train: "train" dev: "dev" test: "test" version: "0.0.1" # Set a random seed seed: 0 # Set your GPU ID, -1 is CPU gpu_id: -1 vectors_model_md: "en_core_web_md" vectors_model_lg: "en_core_web_lg" # These are the directories that the project needs. The project CLI will make # sure that they always exist. directories: ["assets", "corpus", "configs", "training", "scripts", "packages"] # Assets that should be downloaded or available in the directory. We're shipping # them with the project, so they won't have to be downloaded. assets: - dest: "assets/train.jsonl" description: "Training data. For this project, they were chunked into sentences." - dest: "assets/dev.jsonl" description: "Validation data. For this project, they were chunked into sentences." - dest: "assets/test.jsonl" description: "Testing data. For this project, they were chunked into sentences." - dest: "assets/annotated_data.json/" description: "All data, including those without annotations because they are negative examples." - dest: "assets/annotated_data_spans.jsonl" description: "This is just the data that contained examples of span annotations." - dest: "corpus/train.spacy" description: "Training data in serialized format." - dest: "corpus/dev.spacy" description: "Validation data in serialized format." - dest: "corpus/test.spacy" description: "Testing data in serialized format." - dest: "gold-training-data/*" description: "The original outputs from Prodigy, the annotation software used." - dest: "notebooks/*" description: "A collection of notebooks for testing different features of the project." - dest: "configs/*" description: "A collection of config files used for training the spaCy models." # Workflows are sequences of commands (see below) executed in order. You can # run them via "spacy project run [workflow]". If a commands's inputs/outputs # haven't changed, it won't be re-run. workflows: all-sm-sents: - convert-sents - split - create-config-sm - train-sm - evaluate-sm # all-trf: # - download # - convert # - create-config # - train-with-vectors # - evaluate # Project commands, specified in a style similar to CI config files (e.g. Azure # pipelines). The name is the command name that lets you trigger the command # via "spacy project run [command] [path]". The help message is optional and # shown when executing "spacy project run [optional command] [path] --help". commands: #### DOWNLOADING VECTORS ##### - name: "download-lg" help: "Download a spaCy model with pretrained vectors" script: - "python -m spacy download ${vars.vectors_model_lg}" - name: "download-md" help: "Download a spaCy model with pretrained vectors" script: - "python -m spacy download ${vars.vectors_model_md}" #### PREPROCESSING ##### - name: "convert" help: "Convert the data to spaCy's binary format" script: - "python scripts/convert.py ${vars.lang} assets/${vars.train}.jsonl corpus" - "python scripts/convert.py ${vars.lang} assets/${vars.dev}.jsonl corpus" - "python scripts/convert.py ${vars.lang} assets/${vars.test}.jsonl corpus" deps: - "assets/${vars.train}.jsonl" - "assets/${vars.dev}.jsonl" - "assets/${vars.test}.jsonl" - "scripts/convert.py" outputs: - "corpus/train.spacy" - "corpus/dev.spacy" - "corpus/test.spacy" - name: "convert-sents" help: "Convert the data to to sentences before converting to spaCy's binary format" script: - "python scripts/convert_sents.py ${vars.lang} assets/${vars.train}.jsonl corpus" - "python scripts/convert_sents.py ${vars.lang} assets/${vars.dev}.jsonl corpus" - "python scripts/convert_sents.py ${vars.lang} assets/${vars.test}.jsonl corpus" deps: - "assets/${vars.train}.jsonl" - "assets/${vars.dev}.jsonl" - "assets/${vars.test}.jsonl" - "scripts/convert.py" outputs: - "corpus/train.spacy" - "corpus/dev.spacy" - "corpus/test.spacy" - name: "split" help: "Split data into train/dev/test sets" script: - "python scripts/split.py assets/${vars.annotations_file}" deps: - "scripts/split.py" outputs: - "assets/train.jsonl" - "assets/dev.jsonl" - "assets/test.jsonl" #### CONFIG CREATIONS ##### - name: "create-config-sm" help: "Create a new config with a spancat pipeline component" script: - "python -m spacy init fill-config configs/base_config_sm.cfg configs/config_sm.cfg" deps: - configs/base_config_sm.cfg outputs: - "configs/config.cfg" #### TRAINING ##### ### small ### - name: "train-sm" help: "Train the spancat model" script: - >- python -m spacy train configs/config_sm.cfg --output training/sm/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy --training.eval_frequency 50 --training.patience 0 --gpu-id ${vars.gpu_id} --system.seed ${vars.seed} deps: - "configs/config_lg.cfg" - "corpus/train.spacy" - "corpus/dev.spacy" outputs: - "training/model-best" ### medium ### - name: "train-md" help: "Train the spancat model with vectors" script: - >- python -m spacy train configs/config_md.cfg --output training/md/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy --training.eval_frequency 50 --training.patience 0 --gpu-id ${vars.gpu_id} --initialize.vectors ${vars.vectors_model_md} --system.seed ${vars.seed} --components.tok2vec.model.embed.include_static_vectors true deps: - "configs/config_md.cfg" - "corpus/train.spacy" - "corpus/dev.spacy" outputs: - "training/model-best" ### large ### - name: "train-lg" help: "Train the spancat model with vectors" script: - >- python -m spacy train configs/config_lg.cfg --output training/lg/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy --training.eval_frequency 50 --training.patience 0 --gpu-id ${vars.gpu_id} --initialize.vectors ${vars.vectors_model_lg} --system.seed ${vars.seed} --components.tok2vec.model.embed.include_static_vectors true deps: - "configs/config_lg.cfg" - "corpus/train.spacy" - "corpus/dev.spacy" outputs: - "training/model-best" ### transformer ### - name: "train-trf" help: "Train the spancat model" script: - >- python -m spacy train configs/config_trf.cfg --output training/trf/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy --training.patience 100 --gpu-id ${vars.gpu_id} --system.seed ${vars.seed} deps: - "configs/config.cfg" - "corpus/train.spacy" - "corpus/dev.spacy" outputs: - "training/model-best" #### EVALUATION ##### ### small ### - name: "evaluate-sm" help: "Evaluate the model and export metrics" script: - "python -m spacy evaluate training/sm/model-best corpus/test.spacy --output training/sm/metrics.json" deps: - "corpus/test.spacy" - "training/sm/model-best" outputs: - "training/sm/metrics.json" ### medium ### - name: "evaluate-md" help: "Evaluate the model and export metrics" script: - "python -m spacy evaluate training/md/model-best corpus/test.spacy --output training/md/metrics.json" deps: - "corpus/test.spacy" - "training/md/model-best" outputs: - "training/md/metrics.json" ### large ### - name: "evaluate-lg" help: "Evaluate the model and export metrics" script: - "python -m spacy evaluate training/lg/model-best corpus/test.spacy --output training/lg/metrics.json" deps: - "corpus/test.spacy" - "training/lg/model-best" outputs: - "training/lg/metrics.json" #### PACKAGING ##### - name: "build-table" help: "builds a nice table from the metrics for README.md" script: - "python scripts/build-table.py" - name: "readme" help: "builds a nice table from the metrics for README.md" script: - "python scripts/readme.py" - name: package help: "Package the trained model as a pip package" script: - "python -m spacy package training/model-best packages --name ${vars.name} --version ${vars.version} --force" deps: - "training/model-best" outputs_no_cache: - "packages/${vars.lang}_${vars.name}-${vars.version}/dist/${vars.lang}_${vars.name}-${vars.version}.tar.gz" - name: clean help: "Remove intermediary directories" script: - "rm -rf corpus/*" - "rm -rf training/*" - "rm -rf metrics/*"