training: num_epochs: 3 batch_size: 16 learning_rate: 2e-5 dataset: name: imdb split: train[:10%] model: adapter: reduction_factor: 16 lora: r: 4 alpha: 32 student: hidden_size: 384 evaluation: models: - bert-base-uncased - distilbert-base-uncased - roberta-base - gpt2 - bart-base - electra-small-discriminator - t5-small - xlm-roberta-base - albert-base-v2 - xlnet-base-cased - deberta-base - camembert-base - marianmt-en-de - m2m100_418M wandb: project: fine_tuning_comparison entity: your_wandb_username