| | import torch
|
| | import matplotlib.pyplot as plt
|
| | import seaborn as sns
|
| | import numpy as np
|
| | import os
|
| | import sys
|
| | from transformers import AutoTokenizer
|
| |
|
| |
|
| | sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| |
|
| |
|
| | from model import TaxonomyAwareESM
|
| | from dataset import ProteinTaxonomyDataset
|
| |
|
| | def analyze_attention(checkpoint_path, data_path, target_ids, device='cuda'):
|
| | print(f"=== Loading Checkpoint: {checkpoint_path} ===")
|
| |
|
| |
|
| |
|
| | checkpoint = torch.load(checkpoint_path, map_location=device)
|
| | state_dict = checkpoint.get('model_state_dict', checkpoint)
|
| |
|
| |
|
| |
|
| |
|
| | if 'classifier.weight' in state_dict:
|
| | num_classes = state_dict['classifier.weight'].shape[0]
|
| | elif 'classifier.3.weight' in state_dict:
|
| | num_classes = state_dict['classifier.3.weight'].shape[0]
|
| | else:
|
| |
|
| | print("Warning: Could not infer num_classes from state_dict keys. Keys:", state_dict.keys())
|
| | raise KeyError("Could not find classifier weights")
|
| |
|
| | print(f"Detected Num Classes: {num_classes}")
|
| |
|
| | model = TaxonomyAwareESM(
|
| | num_classes=num_classes,
|
| | pretrained_model_name="facebook/esm2_t6_8M_UR50D",
|
| | freeze_backbone=True
|
| | )
|
| |
|
| | model.load_state_dict(state_dict, strict=False)
|
| | model.to(device)
|
| | model.eval()
|
| |
|
| |
|
| | attn_weights_storage = {}
|
| |
|
| | def get_attn_weights(name):
|
| | def hook(module, input, output):
|
| |
|
| |
|
| |
|
| |
|
| | attn_weights_storage[name] = output[1].detach().cpu()
|
| | return hook
|
| |
|
| | model.cross_attention.register_forward_hook(get_attn_weights('cross_attn'))
|
| |
|
| |
|
| |
|
| | print("Loading dataset...")
|
| |
|
| | fasta_path = os.path.join(data_path, "learning_superset", "large_learning_superset.fasta")
|
| | term_path = os.path.join(data_path, "learning_superset", "large_learning_superset_term.tsv")
|
| | species_vec = os.path.join(data_path, "taxon_embedding", "species_vectors.tsv")
|
| | go_vocab = os.path.join("src", "go_terms.json")
|
| |
|
| | tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t6_8M_UR50D")
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | dataset = ProteinTaxonomyDataset(
|
| | fasta_path, term_path, species_vec, go_vocab,
|
| | max_len=512,
|
| | esm_tokenizer=tokenizer
|
| | )
|
| |
|
| | print(f"Dataset loaded. Total samples: {len(dataset)}")
|
| |
|
| |
|
| | target_indices = []
|
| |
|
| |
|
| |
|
| |
|
| | print(f"Searching for target IDs: {target_ids}")
|
| | found_count = 0
|
| |
|
| |
|
| |
|
| | if hasattr(dataset, 'protein_ids'):
|
| | for idx, pid in enumerate(dataset.protein_ids):
|
| | if pid in target_ids:
|
| | target_indices.append(idx)
|
| | found_count += 1
|
| | else:
|
| |
|
| | for i in range(len(dataset)):
|
| | if dataset[i]['entry_id'] in target_ids:
|
| | target_indices.append(i)
|
| | found_count += 1
|
| | if found_count >= len(target_ids):
|
| | break
|
| |
|
| | print(f"Found {len(target_indices)} samples matching targets.")
|
| |
|
| |
|
| | output_dir = os.path.join("outputs", "attention_map")
|
| | os.makedirs(output_dir, exist_ok=True)
|
| |
|
| | for idx in target_indices:
|
| | sample = dataset[idx]
|
| |
|
| |
|
| | input_ids = sample['input_ids'].unsqueeze(0).to(device)
|
| | attention_mask = sample['attention_mask'].unsqueeze(0).to(device)
|
| | tax_vector = sample['tax_vector'].unsqueeze(0).to(device)
|
| | prot_id = sample.get('entry_id', 'Unknown')
|
| |
|
| | print(f"Analyzing Protein ID: {prot_id}")
|
| |
|
| |
|
| |
|
| | attn_weights_storage.clear()
|
| |
|
| | with torch.no_grad():
|
| | _ = model(input_ids, attention_mask, tax_vector)
|
| |
|
| |
|
| | if 'cross_attn' not in attn_weights_storage:
|
| | print(f"Error: Hook did not capture attention weights for {prot_id}.")
|
| | continue
|
| |
|
| |
|
| | weights = attn_weights_storage['cross_attn'][0]
|
| |
|
| |
|
| | seq_len = attention_mask.sum().item()
|
| | weights = weights[:seq_len, :]
|
| |
|
| |
|
| | plt.figure(figsize=(12, 8))
|
| |
|
| | sns.heatmap(weights.T.numpy(), cmap='viridis',
|
| | yticklabels=["Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species"])
|
| | plt.title(f"Cross-Attention Map - Protein {prot_id}")
|
| | plt.xlabel("Sequence Position (Residues)")
|
| | plt.ylabel("Taxonomic Rank")
|
| |
|
| | save_path = os.path.join(output_dir, f"{prot_id}.png")
|
| | plt.savefig(save_path)
|
| | plt.close()
|
| | print(f"Analysis saved to {save_path}")
|
| |
|
| | print("\n[Interpretation Guide]")
|
| | print("- Uniform Color? -> Model hasn't learned to distinguish ranks yet.")
|
| | print("- Vertical Stripes? -> Specific residues attend to ALL ranks (Structural importance).")
|
| | print("- Horizontal Stripes? -> Some ranks are universally more important.")
|
| | print("- Scattered Hotspots? -> IDEAL. Specific residues attend to specific ranks.")
|
| |
|
| | if __name__ == "__main__":
|
| |
|
| | targets = [
|
| | "P0DPQ6", "A0A0C5B5G6", "P40205",
|
| | "F5H094", "Q6RFH8", "Q0D2H9",
|
| | "L0R8F8", "P0DMW2", "Q6L8H1", "A0A1B0GTW7"
|
| | ]
|
| |
|
| | analyze_attention(
|
| | checkpoint_path="outputs/best_model_fmax.pth",
|
| | data_path=".",
|
| | target_ids=targets,
|
| | device='cuda' if torch.cuda.is_available() else 'cpu'
|
| | )
|
| |
|