# Training Seed 3 activations: Tanh batch_size: 2 class_identifier: regression_metric dropout: 0.1 encoder_learning_rate: 1.0e-05 encoder_model: XLM-RoBERTa hidden_sizes: - 3072 - 1536 keep_embeddings_frozen: true layer: mix layerwise_decay: 0.95 learning_rate: 3.0e-05 load_weights_from_checkpoint: null optimizer: Adam pool: avg pretrained_model: xlm-roberta-large train_data: data/scores_1719.csv validation_data: data/scores_1719.csv