| import argparse
|
| import sys
|
| import os
|
| import json
|
| import torch
|
| import torch.nn as nn
|
| import torch.optim as optim
|
| from torch.utils.data import DataLoader
|
|
|
| import numpy as np
|
| import pandas as pd
|
| from tqdm import tqdm
|
| from pathlib import Path
|
|
|
|
|
| sys.path.append(os.path.join(os.path.dirname(__file__), 'CAFA-evaluator-PK', 'src'))
|
| try:
|
| from cafaeval.parser import obo_parser, gt_parser, pred_parser
|
| from cafaeval.evaluation import evaluate_prediction
|
| HAS_EVAL = True
|
| except ImportError as e:
|
| print(f"Warning: Could not import cafaeval: {e}")
|
| HAS_EVAL = False
|
|
|
| from dataset import ProteinTaxonomyDataset
|
| from model import TaxonomyAwareESM, AsymmetricLoss
|
| from asymmetric_loss import load_ia_weights
|
| from transformers import AutoTokenizer
|
|
|
| def save_checkpoint(model, optimizer, epoch, metrics, filename):
|
| checkpoint = {
|
| 'epoch': epoch,
|
| 'model_state_dict': model.state_dict(),
|
| 'optimizer_state_dict': optimizer.state_dict(),
|
| 'metrics': metrics
|
| }
|
| torch.save(checkpoint, filename)
|
| print(f"Saved checkpoint to {filename}")
|
|
|
| def run_evaluation(model, valid_loader, ontologies, gt, device, out_dir, epoch, prefix="valid"):
|
| """
|
| Runs prediction and CAFA evaluation (Weighted F-max, S-min).
|
| Returns a dict of metrics.
|
| """
|
| model.eval()
|
| all_preds = []
|
|
|
| print(f"Generating predictions for {prefix} set (Epoch {epoch})...")
|
|
|
|
|
| idx_to_go = {v: k for k, v in valid_loader.dataset.go_to_idx.items()}
|
|
|
|
|
|
|
|
|
|
|
| with torch.no_grad():
|
| for batch in tqdm(valid_loader, desc=f"{prefix} Infer"):
|
| input_ids = batch['input_ids'].to(device)
|
| attention_mask = batch['attention_mask'].to(device)
|
| tax_vector = batch['tax_vector'].to(device)
|
| entry_ids = batch['entry_id']
|
|
|
| logits = model(input_ids, attention_mask, tax_vector)
|
| probs = torch.sigmoid(logits)
|
| probs = probs.cpu().numpy()
|
|
|
| for i, entry_id in enumerate(entry_ids):
|
| row_probs = probs[i]
|
|
|
| indices = np.where(row_probs > 0.01)[0]
|
| for idx in indices:
|
| term = idx_to_go[idx]
|
| score = float(row_probs[idx])
|
| all_preds.append((entry_id, term, score))
|
|
|
|
|
| pred_dir = os.path.join(out_dir, "preds_temp", prefix)
|
| os.makedirs(pred_dir, exist_ok=True)
|
| pred_path = os.path.join(pred_dir, f"epoch_{epoch}.tsv")
|
|
|
| with open(pred_path, 'w') as f:
|
| for p in all_preds:
|
| f.write(f"{p[0]}\t{p[1]}\t{p[2]:.5f}\n")
|
|
|
| print(f"Saved {prefix} predictions to {pred_path}")
|
|
|
| if not HAS_EVAL or ontologies is None:
|
| return {}
|
|
|
| print(f"Running CAFA Evaluation for {prefix}...")
|
| try:
|
|
|
|
|
| prediction = pred_parser(pred_path, ontologies, gt, prop_mode='max', max_terms=None)
|
|
|
| if not prediction:
|
| print("Warning: No predictions parsed.")
|
| return {}
|
|
|
|
|
| tau_arr = np.arange(0.01, 1, 0.01)
|
| df_res = evaluate_prediction(
|
| prediction, gt, ontologies, tau_arr,
|
| gt_exclude=None, normalization='cafa', n_cpu=4
|
| )
|
|
|
|
|
| metrics = {}
|
|
|
|
|
|
|
|
|
|
|
| for ns in df_res['ns'].unique():
|
| df_ns = df_res[df_res['ns'] == ns]
|
|
|
|
|
|
|
| if 'f_w' in df_ns.columns:
|
| fmax_w = df_ns['f_w'].max()
|
| metrics[f"{ns}_fmax_w"] = fmax_w
|
|
|
|
|
|
|
| if 's' in df_ns.columns:
|
| smin = df_ns['s'].min()
|
| metrics[f"{ns}_smin"] = smin
|
|
|
| print(f"{prefix} Metrics: {metrics}")
|
|
|
|
|
|
|
|
|
| return metrics
|
|
|
| except Exception as e:
|
| print(f"Evaluation failed: {e}")
|
| import traceback
|
| traceback.print_exc()
|
| return {}
|
|
|
| def evaluate_gpu(model, dataloader, ic_weights, device, thresholds=None, pred_output_path=None, metrics_output_path=None):
|
| """
|
| Calculates Weighted F-max and S-min using GPU streaming to avoid OOM.
|
| """
|
| model.eval()
|
|
|
| if thresholds is None:
|
| thresholds = torch.linspace(0, 1, 101, device=device)
|
|
|
|
|
| sum_prec = torch.zeros(len(thresholds), device=device)
|
| sum_rec = torch.zeros(len(thresholds), device=device)
|
| sum_ru = torch.zeros(len(thresholds), device=device)
|
| sum_mi = torch.zeros(len(thresholds), device=device)
|
|
|
| total_samples = 0
|
|
|
|
|
| f_pred = None
|
| if pred_output_path:
|
| os.makedirs(os.path.dirname(pred_output_path), exist_ok=True)
|
| f_pred = open(pred_output_path, 'w')
|
| idx_to_go = {v: k for k, v in dataloader.dataset.go_to_idx.items()}
|
|
|
| with torch.no_grad():
|
| for batch in tqdm(dataloader, desc="GPU Eval"):
|
| input_ids = batch['input_ids'].to(device)
|
| attention_mask = batch['attention_mask'].to(device)
|
| tax_vector = batch['tax_vector'].to(device)
|
| labels = batch['labels'].to(device)
|
| entry_ids = batch['entry_id']
|
|
|
|
|
| if isinstance(entry_ids, str):
|
| entry_ids = [entry_ids]
|
| if not isinstance(entry_ids, (list, tuple)):
|
| if isinstance(entry_ids, torch.Tensor):
|
| entry_ids = entry_ids.tolist()
|
| else:
|
| entry_ids = list(entry_ids)
|
|
|
|
|
| logits = model(input_ids, attention_mask, tax_vector)
|
| probs = torch.sigmoid(logits)
|
|
|
|
|
| if f_pred:
|
| probs_cpu = probs.cpu().numpy()
|
| for i, entry_id in enumerate(entry_ids):
|
| indices = np.where(probs_cpu[i] > 0.01)[0]
|
| for idx in indices:
|
| term = idx_to_go[idx]
|
| score = probs_cpu[i][idx]
|
| f_pred.write(f"{entry_id}\t{term}\t{score:.4f}\n")
|
|
|
|
|
|
|
| true_ic = (labels * ic_weights).sum(dim=1)
|
| true_ic = torch.maximum(true_ic, torch.tensor(1e-9, device=device))
|
|
|
|
|
|
|
| probs_unsqueezed = probs.unsqueeze(1)
|
| thresholds_unsqueezed = thresholds.view(1, -1, 1)
|
|
|
| pred_binary = (probs_unsqueezed >= thresholds_unsqueezed).float()
|
|
|
| labels_unsqueezed = labels.unsqueeze(1)
|
| ic_weights_unsqueezed = ic_weights.view(1, 1, -1)
|
|
|
|
|
| intersection_ic = (pred_binary * labels_unsqueezed * ic_weights_unsqueezed).sum(dim=2)
|
|
|
|
|
| pred_ic = (pred_binary * ic_weights_unsqueezed).sum(dim=2)
|
|
|
|
|
| precision = intersection_ic / (pred_ic + 1e-9)
|
|
|
|
|
| recall = intersection_ic / (true_ic.view(-1, 1) + 1e-9)
|
|
|
|
|
| ru = true_ic.view(-1, 1) - intersection_ic
|
|
|
| ru = torch.clamp(ru, min=0.0)
|
|
|
|
|
| mi = pred_ic - intersection_ic
|
| mi = torch.clamp(mi, min=0.0)
|
|
|
|
|
| sum_prec += precision.sum(dim=0)
|
| sum_rec += recall.sum(dim=0)
|
| sum_ru += ru.sum(dim=0)
|
| sum_mi += mi.sum(dim=0)
|
|
|
| total_samples += input_ids.size(0)
|
|
|
|
|
| del logits, probs, pred_binary, intersection_ic, pred_ic, ru, mi
|
|
|
|
|
| if hasattr(dataloader.dataset, 'dry_run') and dataloader.dataset.dry_run:
|
|
|
| pass
|
| if total_samples > 200 and 'dry_run' in str(type(dataloader.dataset)):
|
| pass
|
|
|
| if f_pred:
|
| f_pred.close()
|
| print(f"Saved predictions to {pred_output_path}")
|
|
|
|
|
| avg_prec = sum_prec / total_samples
|
| avg_rec = sum_rec / total_samples
|
| avg_ru = sum_ru / total_samples
|
| avg_mi = sum_mi / total_samples
|
|
|
|
|
| f1_scores = 2 * avg_prec * avg_rec / (avg_prec + avg_rec + 1e-9)
|
| best_fmax = f1_scores.max().item()
|
| best_t_idx = f1_scores.argmax().item()
|
| best_threshold_f = thresholds[best_t_idx].item()
|
|
|
|
|
|
|
| s_scores = torch.sqrt(avg_ru**2 + avg_mi**2)
|
| min_s = s_scores.min().item()
|
| best_s_idx = s_scores.argmin().item()
|
| best_threshold_s = thresholds[best_s_idx].item()
|
|
|
| metrics = {
|
| 'fmax_w': best_fmax,
|
| 'threshold_fmax': best_threshold_f,
|
| 'smin': min_s,
|
| 'threshold_smin': best_threshold_s,
|
| }
|
|
|
|
|
| if metrics_output_path:
|
| metrics_data = {
|
| 'threshold': thresholds.cpu().numpy(),
|
| 'precision': avg_prec.cpu().numpy(),
|
| 'recall': avg_rec.cpu().numpy(),
|
| 'f1': f1_scores.cpu().numpy(),
|
| 'ru': avg_ru.cpu().numpy(),
|
| 'mi': avg_mi.cpu().numpy(),
|
| 's': s_scores.cpu().numpy()
|
| }
|
| pd.DataFrame(metrics_data).to_csv(metrics_output_path, sep='\t', index=False)
|
| print(f"Saved detailed metrics to {metrics_output_path}")
|
|
|
| return metrics
|
|
|
| def validate_loss(model, valid_loader, criterion, device):
|
| model.eval()
|
| total_loss = 0
|
| steps = 0
|
| torch.cuda.empty_cache()
|
| with torch.no_grad():
|
| for batch in tqdm(valid_loader, desc="Valid Loss"):
|
| input_ids = batch['input_ids'].to(device)
|
| attention_mask = batch['attention_mask'].to(device)
|
| tax_vector = batch['tax_vector'].to(device)
|
| labels = batch['labels'].to(device)
|
|
|
| with torch.amp.autocast(device_type=device.type):
|
| logits = model(input_ids, attention_mask, tax_vector)
|
| loss = criterion(logits, labels)
|
|
|
| total_loss += loss.item()
|
| steps += 1
|
|
|
| return total_loss / steps
|
|
|
| def main():
|
| parser = argparse.ArgumentParser()
|
| parser.add_argument("--data_path", type=str, required=True, help="Path to mounted dataset")
|
| parser.add_argument("--lr", type=float, default=5e-5)
|
| parser.add_argument("--batch_size", type=int, default=32)
|
| parser.add_argument("--epochs", type=int, default=10)
|
| parser.add_argument("--num_workers", type=int, default=4, help="Number of data loader workers")
|
| parser.add_argument("--T_0", type=int, default=10, help="CosineAnnealingWarmRestarts T_0")
|
| parser.add_argument("--T_mult", type=int, default=1, help="CosineAnnealingWarmRestarts T_mult")
|
| parser.add_argument("--min_lr", type=float, default=1e-6, help="Minimum learning rate")
|
| parser.add_argument("--esm_model_name", type=str, default="facebook/esm2_t33_650M_UR50D", help="ESM model name")
|
| parser.add_argument("--gamma_neg", type=float, default=2, help="Asymmetric Loss gamma_neg")
|
| parser.add_argument("--gamma_pos", type=float, default=0, help="Asymmetric Loss gamma_pos")
|
| parser.add_argument("--clip", type=float, default=0.05, help="Asymmetric Loss clip")
|
| parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm for clipping")
|
| parser.add_argument("--output_dir", type=str, default="outputs", help="Directory for checkpoints and predictions")
|
| parser.add_argument("--mlflow_dir", type=str, default="mlruns", help="Directory for MLflow logs")
|
|
|
|
|
| parser.add_argument("--use_lora", type=bool, default=True, help="Use LoRA for ESM backbone")
|
| parser.add_argument("--lora_rank", type=int, default=8, help="LoRA rank")
|
| parser.add_argument("--dry_run", action="store_true", help="Run a short dry run for testing")
|
| parser.add_argument("--resume_checkpoint", type=str, default=None, help="Path to checkpoint to resume from")
|
| parser.add_argument("--skip_eval", action="store_true", help="Skip GPU evaluation during training")
|
|
|
| args = parser.parse_args()
|
|
|
|
|
| data_path = Path(args.data_path)
|
| train_fasta = data_path / "learning_superset" / "large_learning_superset.fasta"
|
| train_term = data_path / "learning_superset" / "large_learning_superset_term.tsv"
|
|
|
| val_fasta = data_path / "validation_superset" / "validation_superset.fasta"
|
| val_term = data_path / "validation_superset" / "validation_superset_term.tsv"
|
|
|
|
|
| val_novel_fasta = data_path / "validation_superset" / "validation_novel" / "validation_novel.fasta"
|
| val_novel_term = data_path / "validation_superset" / "validation_novel" / "validation_novel_terms.tsv"
|
|
|
| val_homolog_fasta = data_path / "validation_superset" / "validation_homolog" / "validation_homolog.fasta"
|
| val_homolog_term = data_path / "validation_superset" / "validation_homolog" / "validation_homolog_terms.tsv"
|
|
|
| species_vec = data_path / "taxon_embedding" / "species_vectors.tsv"
|
|
|
|
|
| go_vocab_path = "src/go_terms.json"
|
| if not os.path.exists(go_vocab_path):
|
| go_vocab_path = "go_terms.json"
|
|
|
|
|
| obo_path = data_path / "go_info" / "go-basic.obo"
|
| ia_path = data_path / "IA.tsv"
|
|
|
|
|
| go_matrix_path = data_path / "go_info" / "go_ancestor_matrix.npz"
|
| go_mapping_path = data_path / "go_info" / "go_term_mappings.pkl"
|
|
|
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| print(f"Using device: {device}")
|
|
|
|
|
| print(f"Loading tokenizer for: {args.esm_model_name}")
|
| tokenizer = AutoTokenizer.from_pretrained(args.esm_model_name)
|
|
|
|
|
| print("Initializing Datasets...")
|
| train_dataset = ProteinTaxonomyDataset(
|
| train_fasta, train_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
|
| go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
|
| )
|
| val_dataset = ProteinTaxonomyDataset(
|
| val_fasta, val_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
|
| go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
|
| )
|
|
|
| train_loader = DataLoader(
|
| train_dataset,
|
| batch_size=args.batch_size,
|
| shuffle=True,
|
| num_workers=args.num_workers,
|
| pin_memory=True,
|
| persistent_workers=True if args.num_workers > 0 else False,
|
| prefetch_factor=2 if args.num_workers > 0 else None
|
| )
|
| val_loader = DataLoader(
|
| val_dataset,
|
| batch_size=max(1, args.batch_size // 2),
|
| shuffle=False,
|
| num_workers=args.num_workers,
|
| pin_memory=True,
|
| persistent_workers=True if args.num_workers > 0 else False,
|
| prefetch_factor=2 if args.num_workers > 0 else None
|
| )
|
|
|
|
|
| print("Initializing Novel Validation Set...")
|
| val_novel_dataset = ProteinTaxonomyDataset(
|
| val_novel_fasta, val_novel_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
|
| go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
|
| )
|
| val_novel_loader = DataLoader(val_novel_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
|
|
|
| print("Initializing Homolog Validation Set...")
|
| val_homolog_dataset = ProteinTaxonomyDataset(
|
| val_homolog_fasta, val_homolog_term, species_vec, go_vocab_path, max_len=1024, esm_tokenizer=tokenizer,
|
| go_matrix_path=str(go_matrix_path), go_mapping_path=str(go_mapping_path)
|
| )
|
| val_homolog_loader = DataLoader(val_homolog_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
|
|
|
|
|
| model = TaxonomyAwareESM(
|
| num_classes=train_dataset.num_classes,
|
| pretrained_model_name=args.esm_model_name,
|
| use_lora=args.use_lora,
|
| lora_rank=args.lora_rank,
|
| vocab_sizes=train_dataset.vocab_sizes
|
| ).to(device)
|
|
|
| criterion = AsymmetricLoss(gamma_neg=args.gamma_neg, gamma_pos=args.gamma_pos, clip=args.clip).to(device)
|
| optimizer = optim.AdamW(model.parameters(), lr=args.lr)
|
|
|
|
|
| scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
|
| optimizer, T_0=args.T_0, T_mult=args.T_mult, eta_min=args.min_lr
|
| )
|
|
|
| scaler = torch.cuda.amp.GradScaler(enabled=(device.type == 'cuda'))
|
|
|
|
|
| ontologies = None
|
| gt = None
|
| if HAS_EVAL:
|
| print("Loading Ontology and Ground Truth...")
|
|
|
|
|
| ontologies = obo_parser(
|
| str(obo_path),
|
| ("is_a", "part_of"),
|
| str(ia_path) if ia_path.exists() else None,
|
| True
|
| )
|
| gt = gt_parser(str(val_term), ontologies)
|
|
|
|
|
|
|
|
|
| print("Loading Ground Truth for Novel/Homolog...")
|
| gt_novel = gt_parser(str(val_novel_term), ontologies)
|
| gt_homolog = gt_parser(str(val_homolog_term), ontologies)
|
|
|
|
|
| print("Loading IC Weights for GPU Evaluation...")
|
| ic_weights = load_ia_weights(
|
| str(ia_path) if ia_path.exists() else "IA.tsv",
|
| train_dataset.go_to_idx,
|
| train_dataset.num_classes
|
| ).to(device)
|
|
|
|
|
| import mlflow
|
| import time
|
|
|
|
|
| if args.mlflow_dir:
|
| mlflow_uri = Path(args.mlflow_dir).resolve().as_uri()
|
| mlflow.set_tracking_uri(mlflow_uri)
|
| print(f"MLflow tracking URI: {mlflow_uri}")
|
|
|
| mlflow.start_run()
|
| mlflow.log_params(vars(args))
|
|
|
| best_val_loss = float('inf')
|
| output_dir = Path(args.output_dir)
|
| os.makedirs(output_dir, exist_ok=True)
|
|
|
|
|
| best_model_path = output_dir / "best_model_loss.pth"
|
| best_wf_max = 0.0
|
|
|
| start_epoch = 1
|
|
|
|
|
| if args.resume_checkpoint and os.path.exists(args.resume_checkpoint):
|
| print(f"Resuming training from checkpoint: {args.resume_checkpoint}")
|
| checkpoint = torch.load(args.resume_checkpoint, map_location=device)
|
|
|
|
|
| model.load_state_dict(checkpoint['model_state_dict'])
|
|
|
|
|
| if 'optimizer_state_dict' in checkpoint:
|
| optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| start_epoch = checkpoint['epoch'] + 1
|
| print(f"Resuming from Epoch {start_epoch}")
|
|
|
|
|
| if 'metrics' in checkpoint and 'val_loss' in checkpoint['metrics']:
|
| best_val_loss = checkpoint['metrics']['val_loss']
|
| print(f"Restored Best Val Loss: {best_val_loss}")
|
|
|
|
|
| if start_epoch > 1:
|
|
|
|
|
|
|
|
|
| for _ in range(1, start_epoch):
|
| if _ >= 3:
|
| scheduler.step()
|
|
|
| for epoch in range(start_epoch, args.epochs + 1):
|
| epoch_start_time = time.time()
|
|
|
|
|
| model.train()
|
| total_loss = 0
|
| total_grad_norm = 0
|
| steps = 0
|
|
|
|
|
| if epoch == 1:
|
| for param_group in optimizer.param_groups:
|
| param_group['lr'] = args.lr * 0.25
|
| elif epoch == 2:
|
| for param_group in optimizer.param_groups:
|
| param_group['lr'] = args.lr * 0.50
|
| elif epoch == 3:
|
|
|
| for param_group in optimizer.param_groups:
|
| param_group['lr'] = args.lr
|
|
|
|
|
| current_lr = optimizer.param_groups[0]['lr']
|
|
|
| pbar = tqdm(train_loader, desc=f"Epoch {epoch} Train")
|
| for batch in pbar:
|
| input_ids = batch['input_ids'].to(device)
|
| attention_mask = batch['attention_mask'].to(device)
|
| tax_vector = batch['tax_vector'].to(device)
|
| labels = batch['labels'].to(device)
|
|
|
| optimizer.zero_grad()
|
|
|
| with torch.amp.autocast(device_type=device.type):
|
| logits = model(input_ids, attention_mask, tax_vector)
|
| loss = criterion(logits, labels)
|
|
|
| scaler.scale(loss).backward()
|
|
|
|
|
| scaler.unscale_(optimizer)
|
| grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
|
|
| scaler.step(optimizer)
|
| scaler.update()
|
|
|
| total_loss += loss.item()
|
| total_grad_norm += grad_norm.item()
|
| steps += 1
|
|
|
|
|
| if steps % 10 == 0:
|
| current_gnorm = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm
|
| global_step = (epoch - 1) * len(train_loader) + steps
|
|
|
| mlflow.log_metrics({
|
| "step_train_loss": loss.item(),
|
| "step_grad_norm": current_gnorm,
|
| "step_lr": optimizer.param_groups[0]['lr']
|
| }, step=global_step)
|
|
|
| pbar.set_postfix({'loss': total_loss/steps})
|
|
|
| if args.dry_run and steps >= 5:
|
| print("Dry run: breaking training loop.")
|
| break
|
|
|
|
|
| if epoch >= 3:
|
| scheduler.step()
|
|
|
| train_loss = total_loss / steps
|
| avg_grad_norm = total_grad_norm / steps
|
| print(f"Epoch {epoch} Train Loss: {train_loss:.4f}, Grad Norm: {avg_grad_norm:.4f}, LR: {current_lr:.2e}")
|
|
|
|
|
| val_loss = validate_loss(model, val_loader, criterion, device)
|
| print(f"Epoch {epoch} Val Loss: {val_loss:.4f}")
|
|
|
| epoch_time = time.time() - epoch_start_time
|
|
|
|
|
| mlflow.log_metrics({
|
| "train_loss": train_loss,
|
| "avg_grad_norm": avg_grad_norm,
|
| "val_loss": val_loss,
|
| "lr": current_lr,
|
| "epoch_time": epoch_time
|
| }, step=epoch)
|
|
|
| if val_loss < best_val_loss:
|
| print(f"New Best Val Loss: {val_loss:.4f} (was {best_val_loss:.4f})")
|
| best_val_loss = val_loss
|
| save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, best_model_path)
|
| mlflow.log_metric("best_val_loss", best_val_loss, step=epoch)
|
|
|
|
|
|
|
|
|
|
|
| run_eval = (epoch in [3, 10, 15, 20] or args.dry_run) and not args.skip_eval
|
|
|
| if run_eval:
|
| print(f"Epoch {epoch}: Running GPU CAFA Evaluation on Best Model (Loss: {best_val_loss:.4f})...")
|
|
|
| current_state = {
|
| 'model': model.state_dict(),
|
| 'optimizer': optimizer.state_dict()
|
| }
|
|
|
| if os.path.exists(best_model_path):
|
| checkpoint = torch.load(best_model_path)
|
| model.load_state_dict(checkpoint['model_state_dict'])
|
| print(f"Loaded best model from epoch {checkpoint['epoch']} for evaluation.")
|
| else:
|
| print("Warning: Best model not found, evaluating current model.")
|
|
|
|
|
| metrics_novel = evaluate_gpu(
|
| model, val_novel_loader, ic_weights, device,
|
| pred_output_path=output_dir / f"gpu_preds_novel_epoch_{epoch}.tsv",
|
| metrics_output_path=output_dir / f"metrics_novel_epoch_{epoch}.tsv"
|
| )
|
|
|
|
|
| metrics_homolog = evaluate_gpu(
|
| model, val_homolog_loader, ic_weights, device,
|
| pred_output_path=output_dir / f"gpu_preds_homolog_epoch_{epoch}.tsv",
|
| metrics_output_path=output_dir / f"metrics_homolog_epoch_{epoch}.tsv"
|
| )
|
|
|
|
|
| all_metrics = {}
|
| for k, v in metrics_novel.items():
|
| all_metrics[f"novel_{k}"] = v
|
| for k, v in metrics_homolog.items():
|
| all_metrics[f"homolog_{k}"] = v
|
|
|
| mlflow.log_metrics(all_metrics, step=epoch)
|
| print("Evaluation Complete. Metrics:", all_metrics)
|
|
|
|
|
|
|
| novel_fmax = metrics_novel['fmax_w']
|
| if novel_fmax > best_wf_max:
|
| best_wf_max = novel_fmax
|
| print(f"New Best Novel F-max: {best_wf_max:.4f}")
|
| save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth")
|
|
|
|
|
| model.load_state_dict(current_state['model'])
|
| optimizer.load_state_dict(current_state['optimizer'])
|
| print("Restored training state.")
|
|
|
| if args.dry_run:
|
| print("Dry run complete (Evaluation).")
|
|
|
| novel_fmax = metrics_novel['fmax_w']
|
| if novel_fmax > best_wf_max:
|
| best_wf_max = novel_fmax
|
| print(f"New Best Novel F-max: {best_wf_max:.4f}")
|
| save_checkpoint(model, optimizer, epoch, {'val_loss': best_val_loss, 'novel_fmax': best_wf_max}, output_dir / "best_model_fmax.pth")
|
|
|
|
|
| model.load_state_dict(current_state['model'])
|
| optimizer.load_state_dict(current_state['optimizer'])
|
| print("Restored training state.")
|
|
|
| if args.dry_run:
|
| print("Dry run complete (Evaluation).")
|
|
|
| save_checkpoint(model, optimizer, epoch, {'val_loss': val_loss}, output_dir / "latest_model.pth")
|
|
|
| mlflow.end_run()
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|