nr_frozen_epochs: 0.3 class_identifier: referenceless_regression_metric keep_embeddings_frozen: True optimizer: AdamW encoder_learning_rate: 1.0e-06 learning_rate: 1.5e-05 layerwise_decay: 0.95 encoder_model: XLM-RoBERTa pretrained_model: Davlan/afro-xlmr-large pool: avg layer: mix layer_transformation: sparsemax layer_norm: False loss: mse dropout: 0.1 batch_size: 4 train_data: - /SAN/intelsys/llm/jiaywang/COMET-2.0.2/data/annotation/wmt/train/wmt_final_previous.csv validation_data: - /SAN/intelsys/llm/jiaywang/COMET-2.0.2/data/annotation/final/annotation_1027_dev_devtest_prep_ade/en-kik-ade_dev.csv hidden_sizes: - 3072 - 1024 activations: Tanh