Johnyquest7 commited on
Commit
0a9bbd2
·
verified ·
1 Parent(s): 532037a

Upload generate_gradcam_locally.py

Browse files
Files changed (1) hide show
  1. generate_gradcam_locally.py +178 -0
generate_gradcam_locally.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Generate Grad-CAM visualizations for the thyroid model.
4
+ Run this locally with a GPU for best performance, or on CPU (slower).
5
+
6
+ Usage:
7
+ python generate_gradcam_locally.py
8
+
9
+ Requirements:
10
+ pip install torch torchvision transformers datasets matplotlib Pillow huggingface_hub
11
+ """
12
+ import os, math, json, random, warnings, traceback
13
+ warnings.filterwarnings("ignore")
14
+
15
+ import numpy as np
16
+ from PIL import Image
17
+ import matplotlib
18
+ matplotlib.use("Agg")
19
+ import matplotlib.pyplot as plt
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from datasets import load_dataset
24
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
25
+ from huggingface_hub import HfApi
26
+
27
+ # ============== CONFIG ==============
28
+ HF_USERNAME = "Johnyquest7"
29
+ DATASET_NAME = "BTX24/thyroid-cancer-classification-ultrasound-dataset"
30
+ MODEL_NAME = f"{HF_USERNAME}/ML-Inter_thyroid"
31
+ OUTPUT_DIR = "./gradcam_outputs"
32
+ REPO_ID = f"{HF_USERNAME}/thyroid-training-scripts"
33
+ SEED = 42
34
+ BATCH_SIZE = 16
35
+ NUM_CORRECT = 5 # Number of correct predictions to visualize
36
+ NUM_WRONG = 5 # Number of incorrect predictions to visualize
37
+ # =====================================
38
+
39
+ random.seed(SEED)
40
+ np.random.seed(SEED)
41
+ torch.manual_seed(SEED)
42
+
43
+ def main():
44
+ print("=" * 60)
45
+ print("Thyroid Grad-CAM Visualization Generator")
46
+ print("=" * 60)
47
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
48
+
49
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
50
+ print(f"\nDevice: {device}")
51
+ if device.type == "cpu":
52
+ print("WARNING: Running on CPU. This will be slow for SwinV2 backward passes.")
53
+ print("Consider running on Google Colab or a machine with GPU.")
54
+
55
+ print(f"Loading model: {MODEL_NAME}")
56
+ processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
57
+ model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device).eval()
58
+ id2label = model.config.id2label
59
+ print(f"Model loaded: {sum(p.numel() for p in model.parameters())/1e6:.1f}M params")
60
+
61
+ print(f"\nLoading dataset: {DATASET_NAME}")
62
+ ds = load_dataset(DATASET_NAME, split="train")
63
+ ds = ds.shuffle(seed=SEED)
64
+ train_test = ds.train_test_split(test_size=0.2, stratify_by_column="label", seed=SEED)
65
+ test_ds = train_test["test"]
66
+ print(f"Test samples: {len(test_ds)}")
67
+
68
+ # Get predictions
69
+ all_logits, all_labels = [], []
70
+ print("\nRunning inference...")
71
+ for i in range(0, len(test_ds), BATCH_SIZE):
72
+ batch_items = [test_ds[j] for j in range(i, min(i+BATCH_SIZE, len(test_ds)))]
73
+ images = [item["image"].convert("RGB") for item in batch_items]
74
+ inputs = processor(images, return_tensors="pt")
75
+ with torch.no_grad():
76
+ outputs = model(pixel_values=inputs["pixel_values"].to(device))
77
+ all_logits.extend(outputs.logits.cpu().numpy())
78
+ all_labels.extend([item["label"] for item in batch_items])
79
+ if (i // BATCH_SIZE) % 5 == 0:
80
+ print(f" Batch {i//BATCH_SIZE + 1}/{(len(test_ds)+BATCH_SIZE-1)//BATCH_SIZE}")
81
+
82
+ y_true = np.array(all_labels)
83
+ y_pred = np.argmax(np.array(all_logits), axis=1)
84
+
85
+ correct_idx = [i for i in range(len(y_true)) if y_true[i] == y_pred[i]]
86
+ incorrect_idx = [i for i in range(len(y_true)) if y_true[i] != y_pred[i]]
87
+ random.shuffle(correct_idx)
88
+ random.shuffle(incorrect_idx)
89
+ selected = correct_idx[:NUM_CORRECT] + incorrect_idx[:NUM_WRONG]
90
+ print(f"\nSelected {len(selected)} samples: {len(correct_idx[:NUM_CORRECT])} correct, {len(incorrect_idx[:NUM_WRONG])} incorrect")
91
+
92
+ # Register hooks on last SwinV2 stage
93
+ gradcam_data = {}
94
+ def fwd_hook(module, input, output):
95
+ gradcam_data["feat"] = output.detach()
96
+ def bwd_hook(module, grad_input, grad_output):
97
+ gradcam_data["grad"] = grad_output[0].detach()
98
+
99
+ target_layer = model.swinv2.encoder.layers[-1].blocks[-1].layernorm_after
100
+ fwd_handle = target_layer.register_forward_hook(fwd_hook)
101
+ bwd_handle = target_layer.register_full_backward_hook(bwd_hook)
102
+
103
+ local_files = []
104
+ for idx in selected:
105
+ try:
106
+ item = test_ds[idx]
107
+ img = item["image"].convert("RGB")
108
+ label = item["label"]
109
+ inputs = processor(img, return_tensors="pt")
110
+ img_tensor = inputs["pixel_values"].to(device).requires_grad_(True)
111
+ model.zero_grad()
112
+ outputs = model(pixel_values=img_tensor)
113
+ target_class = int(y_pred[idx])
114
+ score = outputs.logits[0, target_class]
115
+ score.backward()
116
+
117
+ feat = gradcam_data["feat"][0] # [H*W, C]
118
+ grads = gradcam_data["grad"][0] # [H*W, C]
119
+ weights = grads.mean(dim=0, keepdim=True)
120
+ cam = torch.matmul(feat, weights.t()).squeeze() # [H*W]
121
+ H = W = int(math.sqrt(cam.shape[0]))
122
+ cam = cam.reshape(H, W)
123
+ cam = F.relu(cam)
124
+ cam = cam - cam.min()
125
+ cam = cam / (cam.max() + 1e-8)
126
+ cam = F.interpolate(cam.unsqueeze(0).unsqueeze(0), size=(256, 256), mode="bilinear", align_corners=False)
127
+ cam = cam.squeeze().cpu().numpy()
128
+
129
+ img_np = img_tensor.squeeze().detach().cpu().permute(1,2,0).numpy()
130
+ img_np = (img_np - img_np.min()) / (img_np.max() - img_np.min() + 1e-8)
131
+
132
+ plt.figure(figsize=(6,6))
133
+ plt.imshow(img_np)
134
+ plt.imshow(cam, cmap="jet", alpha=0.5)
135
+ pred_name = id2label.get(target_class, str(target_class))
136
+ true_name = id2label.get(label, str(label))
137
+ status = "CORRECT" if y_true[idx] == y_pred[idx] else "WRONG"
138
+ plt.title(f"{status}: Pred={pred_name} | True={true_name}")
139
+ plt.axis("off")
140
+ fname = f"gradcam_{status}_sample{idx}_{pred_name}_vs_{true_name}.png"
141
+ fpath = os.path.join(OUTPUT_DIR, fname)
142
+ plt.savefig(fpath, bbox_inches="tight", dpi=150)
143
+ plt.close()
144
+ local_files.append(fpath)
145
+ print(f" Saved: {fpath}")
146
+ except Exception as e:
147
+ print(f" Skipped sample {idx}: {e}")
148
+ traceback.print_exc()
149
+
150
+ fwd_handle.remove()
151
+ bwd_handle.remove()
152
+
153
+ print(f"\n{'='*60}")
154
+ print(f"Generated {len(local_files)} Grad-CAM images in {OUTPUT_DIR}/")
155
+ print(f"{'='*60}")
156
+
157
+ # Upload to Hub
158
+ print("\nUploading to Hugging Face Hub...")
159
+ api = HfApi()
160
+ uploaded = 0
161
+ for fpath in local_files:
162
+ fname = os.path.basename(fpath)
163
+ try:
164
+ api.upload_file(
165
+ path_or_fileobj=fpath,
166
+ path_in_file=f"gradcam/{fname}",
167
+ repo_id=REPO_ID,
168
+ repo_type="model"
169
+ )
170
+ print(f" Uploaded: gradcam/{fname}")
171
+ uploaded += 1
172
+ except Exception as e:
173
+ print(f" Failed to upload {fname}: {e}")
174
+
175
+ print(f"\nDone! Uploaded {uploaded}/{len(local_files)} images to https://huggingface.co/{REPO_ID}/tree/main/gradcam/")
176
+
177
+ if __name__ == "__main__":
178
+ main()