ecfr-textcat / project.yml
DagimB's picture
Update project.yml (#5)
4a23eca verified
---
title: "Citations of ECFR Banking Regulation in a spaCy pipeline."
description: "Custom text classification project for spaCy v3 adapted from the spaCy v3"
author: Manjinder
date: 2024-05-01
tags:
- machine learning
- natural language processing
- huggingface
---
vars:
config: "default"
lang: "en"
train: corpus/train.spacy
dev: corpus/dev.spacy
version: "0.1.0"
gpu_id: -1
vectors_model: "en_core_web_lg"
name: ecfr_ner
prodigy:
ner_labels: ecfr_initial_ner
ner_manual_labels: ecfr_manual_ner
senter_labels: ecfr_labeled_sents
ner_labeled_dataset: ecfr_labeled_ner
directories:
- corpus/labels
- data
- my_trained_model/textcat_multilabel
- my_trained_model/vocab
- output/experiment1/model-best/textcat_multilabel
- output/experiment1/model-best/vocab
- output/experiment1/model-last/textcat_multilabel
- output/experiment1/model-last/vocab
- output/experiment3/model-best/textcat_multilabel
- output/experiment3/model-best/vocab
- output/experiment3/model-last/textcat_multilabel
- output/experiment3/model-last/vocab
- python_Code
assets:
- dest: "corpus/labels/ner.json"
description: "JSON file containing NER labels"
- dest: "corpus/labels/parser.json"
description: "JSON file containing parser labels"
- dest: "corpus/labels/tagger.json"
description: "JSON file containing tagger labels"
- dest: "corpus/labels/textcat_multilabel.json"
description: "JSON file containing multilabel text classification labels"
- dest: "data/eval.jsonl"
description: "JSONL file containing evaluation data"
- dest: "data/firstStep_file.jsonl"
description: "JSONL file containing formatted data from the first step"
- dest: "data/five_examples_annotated5.jsonl"
description: "JSONL file containing five annotated examples"
- dest: "data/goldenEval.jsonl"
description: "JSONL file containing golden evaluation data"
- dest: "data/thirdStep_file.jsonl"
description: "JSONL file containing classified data from the third step"
- dest: "data/train.jsonl"
description: "JSONL file containing training data"
- dest: "data/train200.jsonl"
description: "JSONL file containing initial training data"
- dest: "data/train4465.jsonl"
description: "JSONL file containing formatted and labeled training data"
- dest: "my_trained_model/textcat_multilabel/cfg"
description: "Configuration files for the text classification model"
- dest: "my_trained_model/textcat_multilabel/model"
description: "Trained model files for the text classification model"
- dest: "my_trained_model/vocab/key2row"
description: "Mapping from keys to row indices in the vocabulary"
- dest: "my_trained_model/vocab/lookups.bin"
description: "Binary lookups file for the vocabulary"
- dest: "my_trained_model/vocab/strings.json"
description: "JSON file containing string representations of the vocabulary"
- dest: "my_trained_model/vocab/vectors"
description: "Directory containing vector files for the vocabulary"
- dest: "my_trained_model/vocab/vectors.cfg"
description: "Configuration file for vectors in the vocabulary"
- dest: "my_trained_model/config.cfg"
description: "Configuration file for the trained model"
- dest: "my_trained_model/meta.json"
description: "JSON file containing metadata for the trained model"
- dest: "my_trained_model/tokenizer"
description: "Tokenizer files for the trained model"
- dest: "output/experiment1/model-best/textcat_multilabel/cfg"
description: "Configuration files for the best model in experiment 1"
- dest: "output/experiment1/model-best/textcat_multilabel/model"
description: "Trained model files for the best model in experiment 1"
- dest: "output/experiment1/model-best/vocab/key2row"
description: "Mapping from keys to row indices in the vocabulary for the best model in experiment 1"
- dest: "output/experiment1/model-best/vocab/lookups.bin"
description: "Binary lookups file for the vocabulary for the best model in experiment 1"
- dest: "output/experiment1/model-best/vocab/strings.json"
description: "JSON file containing string representations of the vocabulary for the best model in experiment 1"
- dest: "output/experiment1/model-best/vocab/vectors"
description: "Directory containing vector files for the vocabulary for the best model in experiment 1"
- dest: "output/experiment1/model-best/vocab/vectors.cfg"
description: "Configuration file for vectors in the vocabulary for the best model in experiment 1"
- dest: "output/experiment1/model-best/config.cfg"
description: "Configuration file for the best model in experiment 1"
- dest: "output/experiment1/model-best/meta.json"
description: "JSON file containing metadata for the best model in experiment 1"
- dest: "output/experiment1/model-best/tokenizer"
description: "Tokenizer files for the best model in experiment 1"
- dest: "output/experiment1/model-last/textcat_multilabel/cfg"
description: "Configuration files for the last model in experiment 1"
- dest: "output/experiment1/model-last/textcat_multilabel/model"
description: "Trained model files for the last model in experiment 1"
- dest: "output/experiment1/model-last/vocab/key2row"
description: "Mapping from keys to row indices in the vocabulary for the last model in experiment 1"
- dest: "output/experiment1/model-last/vocab/lookups.bin"
description: "Binary lookups file for the vocabulary for the last model in experiment 1"
- dest: "output/experiment1/model-last/vocab/strings.json"
description: "JSON file containing string representations of the vocabulary for the last model in experiment 1"
- dest: "output/experiment1/model-last/vocab/vectors"
description: "Directory containing vector files for the vocabulary for the last model in experiment 1"
- dest: "output/experiment1/model-last/vocab/vectors.cfg"
description: "Configuration file for vectors in the vocabulary for the last model in experiment 1"
- dest: "output/experiment1/model-last/config.cfg"
description: "Configuration file for the last model in experiment 1"
- dest: "output/experiment1/model-last/meta.json"
description: "JSON file containing metadata for the last model in experiment 1"
- dest: "output/experiment1/model-last/tokenizer"
description: "Tokenizer files for the last model in experiment 1"
- dest: "output/experiment3/model-best/textcat_multilabel/cfg"
description: "Configuration files for the best model in experiment 3"
- dest: "output/experiment3/model-best/textcat_multilabel/model"
description: "Trained model files for the best model in experiment 3"
- dest: "output/experiment3/model-best/vocab/key2row"
description: "Mapping from keys to row indices in the vocabulary for the best model in experiment 3"
- dest: "output/experiment3/model-best/vocab/lookups.bin"
description: "Binary lookups file for the vocabulary for the best model in experiment 3"
- dest: "output/experiment3/model-best/vocab/strings.json"
description: "JSON file containing string representations of the vocabulary for the best model in experiment 3"
- dest: "output/experiment3/model-best/vocab/vectors"
description: "Directory containing vector files for the vocabulary for the best model in experiment 3"
- dest: "output/experiment3/model-best/vocab/vectors.cfg"
description: "Configuration file for vectors in the vocabulary for the best model in experiment 3"
- dest: "output/experiment3/model-best/config.cfg"
description: "Configuration file for the best model in experiment 3"
- dest: "output/experiment3/model-best/meta.json"
description: "JSON file containing metadata for the best model in experiment 3"
- dest: "output/experiment3/model-best/tokenizer"
description: "Tokenizer files for the best model in experiment 3"
- dest: "output/experiment3/model-last/textcat_multilabel/cfg"
description: "Configuration files for the last model in experiment 3"
- dest: "output/experiment3/model-last/textcat_multilabel/model"
description: "Trained model files for the last model in experiment 3"
- dest: "output/experiment3/model-last/vocab/key2row"
description: "Mapping from keys to row indices in the vocabulary for the last model in experiment 3"
- dest: "output/experiment3/model-last/vocab/lookups.bin"
description: "Binary lookups file for the vocabulary for the last model in experiment 3"
- dest: "output/experiment3/model-last/vocab/strings.json"
description: "JSON file containing string representations of the vocabulary for the last model in experiment 3"
- dest: "output/experiment3/model-last/vocab/vectors"
description: "Directory containing vector files for the vocabulary for the last model in experiment 3"
- dest: "output/experiment3/model-last/vocab/vectors.cfg"
description: "Configuration file for vectors in the vocabulary for the last model in experiment 3"
- dest: "output/experiment3/model-last/config.cfg"
description: "Configuration file for the last model in experiment 3"
- dest: "output/experiment3/model-last/meta.json"
description: "JSON file containing metadata for the last model in experiment 3"
- dest: "output/experiment3/model-last/tokenizer"
description: "Tokenizer files for the last model in experiment 3"
- dest: "python_Code/finalStep-formatLabel.py"
description: "Python script for formatting labeled data in the final step"
- dest: "python_Code/firstStep-format.py"
description: "Python script for formatting data in the first step"
- dest: "python_Code/five_examples_annotated.ipynb"
description: "Jupyter notebook containing five annotated examples"
- dest: "python_Code/secondStep-score.py"
description: "Python script for scoring data in the second step"
- dest: "python_Code/thirdStep-label.py"
description: "Python script for labeling data in the third step"
- dest: "python_Code/train_eval_split.ipynb"
description: "Jupyter notebook for training and evaluation data splitting"
- dest: "TerminalCode.txt"
description: "Text file containing terminal code"
- dest: "README.md"
description: "Markdown file containing project documentation"
- dest: "prodigy.json"
description: "JSON file containing Prodigy configuration"
workflows:
all:
- format-script
- train-text-classification-model
- classify-unlabeled-data
- format-labeled-data
- setup-environment
- review-evaluation-data
- export-reviewed-evaluation-data
- import-training-data
- import-golden-evaluation-data
- train-model-experiment1
- download-model
- convert-data-to-spacy-format
- train-custom-model
commands:
- name: "format-script"
help: |
Execute the Python script `firstStep-format.py`, which performs the initial formatting of a dataset file for the first step of the project. This script extracts text and labels from a dataset file in JSONL format and writes them to a new JSONL file in a specific format.
Usage:
```
spacy project run execute-first-step-format-script
```
Explanation:
- The script `firstStep-format.py` reads data from the file specified in the `dataset_file` variable (`data/train200.jsonl` by default).
- It extracts text and labels from each JSON object in the dataset file.
- If both text and at least one label are available, it writes a new JSON object to the output file specified in the `output_file` variable (`data/firstStep_file.jsonl` by default) with the extracted text and labels.
- name: "train-text-classification-model"
help: |
Train a text classification model using spaCy.
Usage:
```
spacy project run train-text-classification-model
```
Explanation:
- This command trains a text classification model using the spaCy library based on the configuration provided in the `textcat_multilabel.cfg` file.
- The model is trained on the data specified in the `train` and `dev` variables (`corpus/train.spacy` and `corpus/dev.spacy` by default).
- The trained model is saved to the directory specified in the `output_model_dir` variable (`my_trained_model/textcat_multilabel/model` by default).
- name: "classify-unlabeled-data"
help: |
Classify unlabeled data using a trained text classification model.
Usage:
```
spacy project run classify-unlabeled-data
```
Explanation:
- This command loads the trained text classification model from the directory specified in the `model_dir` variable (`my_trained_model/textcat_multilabel/model` by default).
- It classifies unlabeled data from the file specified in the `unlabeled_data_file` variable (`data/thirdStep_file.jsonl` by default).
- The classified data is saved to the file specified in the `classified_data_file` variable (`data/classified_data.jsonl` by default).
- name: "format-labeled-data"
help: |
Execute the Python script `finalStep-formatLabel.py`, which performs the final formatting of labeled data for the last step of the project. This script converts labeled data from the JSONL format used by Prodigy to the JSONL format used by spaCy.
Usage:
```
spacy project run format-labeled-data
```
Explanation:
- The script `finalStep-formatLabel.py` reads labeled data from the file specified in the `labeled_data_file` variable (`data/thirdStep_file.jsonl` by default).
- It converts the labeled data from Prodigy's JSONL format to spaCy's JSONL format.
- The converted data is saved to the file specified in the `formatted_data_file` variable (`data/fourthStep_file.jsonl` by default).
- name: "setup-environment"
help: |
Set up the Python environment for the project using pip and the provided requirements.txt file.
Usage:
```
spacy project run setup-environment
```
Explanation:
- This command installs the required Python packages listed in the `requirements.txt` file using pip.
- name: "review-evaluation-data"
help: |
Review the evaluation data using Prodigy.
Usage:
```
spacy project run review-evaluation-data
```
Explanation:
- This command launches Prodigy to review the evaluation data.
- Prodigy loads the evaluation data from the file specified in the `eval_data_file` variable (`data/eval.jsonl` by default).
- You can review the data and annotate it as needed using Prodigy's user interface.
- name: "export-reviewed-evaluation-data"
help: |
Export the reviewed evaluation data from Prodigy.
Usage:
```
spacy project run export-reviewed-evaluation-data
```
Explanation:
- This command exports the reviewed evaluation data from Prodigy to a JSONL file.
- Prodigy exports the reviewed data to the file specified in the `exported_eval_data_file` variable (`data/goldenEval.jsonl` by default).
- name: "import-training-data"
help: |
Import training data into Prodigy.
Usage:
```
spacy project run import-training-data
```
Explanation:
- This command imports training data into Prodigy from the file specified in the `training_data_file` variable (`data/fourthStep_file.jsonl` by default).
- name: "import-golden-evaluation-data"
help: |
Import golden evaluation data into Prodigy.
Usage:
```
spacy project run import-golden-evaluation-data
```
Explanation:
- This command imports golden evaluation data into Prodigy from the file specified in the `golden_evaluation_data_file` variable (`data/goldenEval.jsonl` by default).
- name: "train-model-experiment1"
help: |
Train a text classification model with different configurations for experiment 1.
Usage:
```
spacy project run train-model-experiment1
```
Explanation:
- This command trains a text classification model using different configurations specified in the `experiment1_configs` list in the `config.cfg` file.
- The model is trained on the data specified in the `train` and `dev` variables (`corpus/train.spacy` and `corpus/dev.spacy` by default).
- The trained models are saved to the directories specified in the `output_model_dir` variable (`output/experiment1/model-last/textcat_multilabel/model` and `output/experiment1/model-best/textcat_multilabel/model` by default).
- name: "download-model"
help: |
Download a trained text classification model.
Usage:
```
spacy project run download-model
```
Explanation:
- This command downloads a trained text classification model from the URL specified in the `model_url` variable (`https://example.com/model.tar.gz` by default).
- The downloaded model is saved to the directory specified in the `output_model_dir` variable (`models` by default).
- name: "convert-data-to-spacy-format"
help: |
Convert data to spaCy's JSONL format.
Usage:
```
spacy project run convert-data-to-spacy-format
```
Explanation:
- This command converts data from Prodigy's JSONL format to spaCy's JSONL format.
- It reads data from the file specified in the `prodigy_data_file` variable (`data/ner_dataset.jsonl` by default) and writes the converted data to the file specified in the `spacy_data_file` variable (`data/ner_dataset_spacy.jsonl` by default).
- name: "train-custom-model"
help: |
Train a custom NER model using spaCy.
Usage:
```
spacy project run train-custom-model
```
Explanation:
- This command trains a custom NER model using spaCy based on the configuration provided in the `config.cfg` file.
- The model is trained on the data specified in the `train` and `dev` variables (`corpus/train.spacy` and `corpus/dev.spacy` by default).
- The trained model is saved to the directory specified in the `output_model_dir` variable (`my_trained_model` by default).