File size: 4,455 Bytes
17a04d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import json
import logging
import sys

import flair
import torch

from typing import List

from flair.data import MultiCorpus
from flair.datasets import ColumnCorpus, NER_HIPE_2022, NER_ICDAR_EUROPEANA
from flair.embeddings import (
    TokenEmbeddings,
    StackedEmbeddings,
    TransformerWordEmbeddings
)
from flair import set_seed
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer

from utils import prepare_ajmc_corpus, prepare_clef_2020_corpus, prepare_newseye_fi_sv_corpus, prepare_newseye_de_fr_corpus

logger = logging.getLogger("flair")
logger.setLevel(level="INFO")


def run_experiment(seed: int, batch_size: int, epoch: int, learning_rate: float, subword_pooling: str,
                   hipe_datasets: List[str], json_config: dict):
    hf_model = json_config["hf_model"]
    context_size = json_config["context_size"]
    layers = json_config["layers"] if "layers" in json_config else "-1"
    use_crf = json_config["use_crf"] if "use_crf" in json_config else False

    # Set seed for reproducibility
    set_seed(seed)

    corpus_list = []

    # Dataset-related
    for dataset in hipe_datasets:
        dataset_name, language = dataset.split("/")

        # E.g. topres19th needs no special preprocessing
        preproc_fn = None

        if dataset_name == "ajmc":
            preproc_fn = prepare_ajmc_corpus
        elif dataset_name == "hipe2020":
            preproc_fn = prepare_clef_2020_corpus
        elif dataset_name == "newseye" and language in ["fi", "sv"]:
            preproc_fn = prepare_newseye_fi_sv_corpus
        elif dataset_name == "newseye" and language in ["de", "fr"]:
            preproc_fn = prepare_newseye_de_fr_corpus

        if dataset_name == "icdar":
            corpus_list.append(NER_ICDAR_EUROPEANA(language=language))
        else:
            corpus_list.append(NER_HIPE_2022(dataset_name=dataset_name, language=language, preproc_fn=preproc_fn,
                                             add_document_separator=True))

    if context_size == 0:
        context_size = False

    logger.info("FLERT Context: {}".format(context_size))
    logger.info("Layers: {}".format(layers))
    logger.info("Use CRF: {}".format(use_crf))

    corpora: MultiCorpus = MultiCorpus(corpora=corpus_list, sample_missing_splits=False)
    label_dictionary = corpora.make_label_dictionary(label_type="ner")
    logger.info("Label Dictionary: {}".format(label_dictionary.get_items()))

    embeddings = TransformerWordEmbeddings(
        model=hf_model,
        layers=layers,
        subtoken_pooling=subword_pooling,
        fine_tune=True,
        use_context=context_size,
    )

    tagger: SequenceTagger = SequenceTagger(
        hidden_size=256,
        embeddings=embeddings,
        tag_dictionary=label_dictionary,
        tag_type="ner",
        use_crf=use_crf,
        use_rnn=False,
        reproject_embeddings=False,
    )

    # Trainer
    trainer: ModelTrainer = ModelTrainer(tagger, corpora)

    datasets = "-".join([dataset for dataset in hipe_datasets])

    trainer.fine_tune(
        f"hmbench-{datasets}-{hf_model}-bs{batch_size}-ws{context_size}-e{epoch}-lr{learning_rate}-pooling{subword_pooling}-layers{layers}-crf{use_crf}-{seed}",
        learning_rate=learning_rate,
        mini_batch_size=batch_size,
        max_epochs=epoch,
        shuffle=True,
        embeddings_storage_mode='none',
        weight_decay=0.,
        use_final_model_for_eval=False,
    )

    # Finally, print model card for information
    tagger.print_model_card()


if __name__ == "__main__":
    filename = sys.argv[1]
    with open(filename, "rt") as f_p:
        json_config = json.load(f_p)

    seeds = json_config["seeds"]
    batch_sizes = json_config["batch_sizes"]
    epochs = json_config["epochs"]
    learning_rates = json_config["learning_rates"]
    subword_poolings = json_config["subword_poolings"]

    hipe_datasets = json_config["hipe_datasets"]  # Do not iterate over them

    cuda = json_config["cuda"]
    flair.device = f'cuda:{cuda}'

    for seed in seeds:
        for batch_size in batch_sizes:
            for epoch in epochs:
                for learning_rate in learning_rates:
                    for subword_pooling in subword_poolings:
                        run_experiment(seed, batch_size, epoch, learning_rate, subword_pooling, hipe_datasets,
                                       json_config)  # pylint: disable=no-value-for-parameter