# lightning.pytorch==2.4.0
seed_everything: 42
trainer:
  accelerator: auto
  strategy:
    class_path: lightning.pytorch.strategies.DDPStrategy
    init_args:
      accelerator: null
      parallel_devices: null
      cluster_environment: null
      checkpoint_io: null
      precision_plugin: null
      ddp_comm_state: null
      ddp_comm_hook: null
      ddp_comm_wrapper: null
      model_averaging_period: null
      process_group_backend: null
      timeout: 0:30:00
      start_method: popen
      output_device: null
      dim: 0
      broadcast_buffers: true
      process_group: null
      bucket_cap_mb: 25
      find_unused_parameters: false
      check_reduction: false
      gradient_as_bucket_view: false
      static_graph: false
      delay_all_reduce_named_params: null
      param_to_hook_all_reduce: null
      mixed_precision: null
      device_mesh: null
  devices: auto
  num_nodes: 1
  precision: 32
  logger:
    class_path: lightning.pytorch.loggers.WandbLogger
    init_args:
      name: ta_ecoli_rnafm_1.6B_fold0
      save_dir: genbio_finetune/logs
      version: null
      offline: false
      dir: null
      id: null
      anonymous: null
      project: rna_tasks
      log_model: false
      experiment: null
      prefix: ''
      checkpoint_name: null
      job_type: null
      config: null
      entity: null
      reinit: null
      tags: null
      group: null
      notes: null
      magic: null
      config_exclude_keys: null
      config_include_keys: null
      mode: null
      allow_val_change: null
      resume: null
      force: null
      tensorboard: null
      sync_tensorboard: null
      monitor_gym: null
      save_code: true
      settings: null
  callbacks:
  - class_path: lightning.pytorch.callbacks.LearningRateMonitor
    init_args:
      logging_interval: step
      log_momentum: false
      log_weight_decay: false
  - class_path: lightning.pytorch.callbacks.ModelCheckpoint
    init_args:
      dirpath: genbio_finetune/logs/rna_tasks/ta_ecoli_rnafm_1.6B_fold0
      filename: best_val:{epoch}-{val_pearson:.3f}
      monitor: val_pearson
      verbose: false
      save_last: null
      save_top_k: 1
      save_weights_only: false
      mode: max
      auto_insert_metric_name: true
      every_n_train_steps: null
      train_time_interval: null
      every_n_epochs: 1
      save_on_train_epoch_end: null
      enable_version_counter: true
  fast_dev_run: false
  max_epochs: 15
  min_epochs: null
  max_steps: -1
  min_steps: null
  max_time: null
  limit_train_batches: null
  limit_val_batches: null
  limit_test_batches: null
  limit_predict_batches: null
  overfit_batches: 0.0
  val_check_interval: null
  check_val_every_n_epoch: 1
  num_sanity_val_steps: null
  log_every_n_steps: 50
  enable_checkpointing: null
  enable_progress_bar: null
  enable_model_summary: null
  accumulate_grad_batches: 1
  gradient_clip_val: 1
  gradient_clip_algorithm: null
  deterministic: null
  benchmark: null
  inference_mode: true
  use_distributed_sampler: true
  profiler:
    class_path: lightning.pytorch.profilers.PyTorchProfiler
    init_args:
      dirpath: null
      filename: null
      group_by_input_shapes: false
      emit_nvtx: false
      export_to_chrome: true
      row_limit: 20
      sort_by_key: null
      record_module_names: true
      table_kwargs: null
      record_shapes: false
    dict_kwargs:
      profile_memory: true
  detect_anomaly: false
  barebones: false
  plugins: null
  sync_batchnorm: false
  reload_dataloaders_every_n_epochs: 0
  default_root_dir: genbio_finetune/logs
model:
  class_path: genbio_finetune.tasks.SequenceRegression
  init_args:
    backbone:
      class_path: genbio_finetune.models.rnafm
      init_args:
        from_scratch: false
        max_length: 1024
        use_peft: true
        save_peft_only: true
        lora_r: 32
        lora_alpha: 64
        lora_dropout: 0.1
        lora_target_modules:
        - query
        - value
        config_overwrites:
          hidden_dropout_prob: 0.1
          attention_probs_dropout_prob: 0.1
        model_init_args: null
    adapter:
      class_path: genbio_finetune.models.MLPPoolAdapter
      init_args:
        pooling: mean_pooling
        hidden_sizes:
        - 512
        bias: true
        dropout: 0.1
        dropout_in_middle: false
    num_outputs: 1
    optimizer:
      class_path: torch.optim.AdamW
      init_args:
        lr: 0.0003
        betas:
        - 0.9
        - 0.999
        eps: 1.0e-08
        weight_decay: 0.01
        amsgrad: false
        maximize: false
        foreach: null
        capturable: false
        differentiable: false
        fused: null
    lr_scheduler:
      class_path: genbio_finetune.lr_schedulers.CosineWithWarmup
      init_args:
        warmup_ratio: 0.01
        num_warmup_steps: null
        last_epoch: -1
        verbose: deprecated
    use_legacy_adapter: false
    strict_loading: true
    reset_optimizer_states: false
data:
  class_path: genbio_finetune.data.TranscriptAbundance
  init_args:
    path: genbio-ai/rna-downstream-tasks
    config_name: transcript_abundance_ecoli
    normalize: true
    train_split_name: train
    train_split_files: null
    test_split_files: null
    valid_split_files: null
    random_seed: 42
    batch_size: 4     # global_batch_size = 16
    shuffle: true
    sampler: null
    num_workers: 0
    pin_memory: true
    persistent_workers: false
    cv_num_folds: 5
    cv_test_fold_id: 0
    cv_enable_val_fold: true
    cv_fold_id_col: fold_id
ckpt_path: null