Shanmuk4622 commited on
Commit
2305b52
·
verified ·
1 Parent(s): 80eb1ef

Upload test1/Algo_ImageNet_convnext.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test1/Algo_ImageNet_convnext.py +234 -0
test1/Algo_ImageNet_convnext.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from torch.utils.data import DataLoader
5
+ from torchvision.datasets import ImageFolder
6
+ import torchvision.transforms as transforms
7
+ import torchvision.models as models
8
+ from torchvision.models import ConvNeXt_Tiny_Weights
9
+ from codecarbon import EmissionsTracker
10
+ from carbontracker.tracker import CarbonTracker
11
+ from fvcore.nn import FlopCountAnalysis
12
+ from sklearn.metrics import precision_recall_fscore_support, accuracy_score
13
+ from tqdm import tqdm
14
+ import pandas as pd
15
+ import numpy as np
16
+ import os
17
+ import time
18
+ import logging
19
+ import warnings
20
+ import gc
21
+
22
+ # --- Environment & Logging Optimization ---
23
+ warnings.filterwarnings("ignore", category=UserWarning)
24
+ # Hard-mute CodeCarbon terminal spam
25
+ logging.getLogger("codecarbon").setLevel(logging.CRITICAL)
26
+ logging.getLogger("codecarbon").disabled = True
27
+
28
+ # --- Configurations ---
29
+ DATA_DIR = r"C:\Users\shanm\Dataset Download\custom image net"
30
+ LOG_FILE = "eden_unfrozen_custom_imagenet_convnext.csv"
31
+ MODEL_SAVE_PATH = "eden_unfrozen_convnext_custom_imagenet.pth"
32
+
33
+ BATCH_SIZE = 32
34
+ ACCUMULATION_STEPS = 4
35
+ LEARNING_RATE = 1e-3
36
+ NUM_EPOCHS = 30
37
+ UNFREEZE_EPOCH = 5
38
+ L1_LAMBDA = 1e-5
39
+ NUM_CLASSES = 300 # Matched to your 300 custom folders
40
+
41
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+
43
+ def run_experiment():
44
+ torch.backends.cudnn.benchmark = True
45
+ torch.cuda.empty_cache()
46
+ gc.collect()
47
+
48
+ # --- 1. Pure PyTorch Transfer Learning Setup ---
49
+ weights = ConvNeXt_Tiny_Weights.DEFAULT
50
+ model = models.convnext_tiny(weights=weights)
51
+
52
+ # Freeze the ConvNeXt backbone initially
53
+ for param in model.features.parameters():
54
+ param.requires_grad = False
55
+
56
+ # Isolate and unfreeze the classification head natively for 300 Custom Classes
57
+ in_features = model.classifier[2].in_features
58
+ model.classifier[2] = nn.Linear(in_features, NUM_CLASSES)
59
+
60
+ for param in model.classifier.parameters():
61
+ param.requires_grad = True
62
+
63
+ model = model.to(DEVICE)
64
+ optimizer = optim.Adam(model.classifier.parameters(), lr=LEARNING_RATE)
65
+
66
+ dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)
67
+ with warnings.catch_warnings():
68
+ warnings.simplefilter("ignore")
69
+ total_flops = FlopCountAnalysis(model, dummy_input).total()
70
+ total_params = sum(p.numel() for p in model.parameters())
71
+
72
+ # --- 2. Dataset Setup ---
73
+ transform = transforms.Compose([
74
+ transforms.Resize(256),
75
+ transforms.CenterCrop(224),
76
+ transforms.ToTensor(),
77
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
78
+ ])
79
+
80
+ # Directly loads from the 300 custom class folders
81
+ train_set = ImageFolder(root=DATA_DIR, transform=transform)
82
+ loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True)
83
+
84
+ criterion = nn.CrossEntropyLoss()
85
+ scaler = torch.cuda.amp.GradScaler()
86
+
87
+ # --- 3. Profiling Initialization (SILENCED) ---
88
+ cc_tracker = EmissionsTracker(measure_power_secs=1, save_to_file=False, log_level="critical")
89
+ ct_tracker = CarbonTracker(epochs=NUM_EPOCHS, monitor_epochs=NUM_EPOCHS, update_interval=1)
90
+
91
+ cc_tracker.start()
92
+ all_logs = []
93
+ total_iterations_counter = 0
94
+ session_start_time = time.time()
95
+
96
+ prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = 0.0, 0.0, 0.0
97
+ prev_acc = 0.0
98
+
99
+ print(f"\n[EDEN PROFILING STARTED] | Model: ConvNeXt-Tiny | Classes: {NUM_CLASSES}")
100
+ print(f"Dataset: Custom ImageNet ({len(train_set)} images) | Saving quietly to CSV...\n")
101
+
102
+ for epoch in range(NUM_EPOCHS):
103
+ # --- Stage 2: Progressive Unfreezing ---
104
+ if epoch + 1 == UNFREEZE_EPOCH:
105
+ print(f"\n[Epoch {epoch+1}] Unfreezing ConvNeXt Backbone for Fine-Tuning...")
106
+ for param in model.parameters():
107
+ param.requires_grad = True
108
+ optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE * 0.1)
109
+
110
+ ct_tracker.epoch_start()
111
+ torch.cuda.reset_peak_memory_stats()
112
+ epoch_start_time = time.time()
113
+ model.train()
114
+
115
+ running_loss = 0.0
116
+ all_preds, all_labels = [], []
117
+ epoch_grad_norms = []
118
+
119
+ optimizer.zero_grad()
120
+ pbar = tqdm(loader, desc=f"Epoch {epoch+1}/{NUM_EPOCHS}", unit="batch", leave=False)
121
+
122
+ for i, (images, labels) in enumerate(pbar):
123
+ images, labels = images.to(DEVICE), labels.to(DEVICE)
124
+
125
+ with torch.cuda.amp.autocast():
126
+ outputs = model(images)
127
+ loss = criterion(outputs, labels)
128
+
129
+ # Active Sparse Training (L1 Penalty)
130
+ trainable_params = [p for p in model.parameters() if p.requires_grad]
131
+ l1_penalty = sum(p.abs().sum() for p in trainable_params)
132
+
133
+ total_loss = loss + (L1_LAMBDA * l1_penalty)
134
+ scaled_loss = total_loss / ACCUMULATION_STEPS
135
+
136
+ scaler.scale(scaled_loss).backward()
137
+
138
+ # Non-Destructive L2 Gradient Norm
139
+ grad_norm = 0.0
140
+ for p in model.parameters():
141
+ if p.requires_grad and p.grad is not None:
142
+ grad_norm += p.grad.data.norm(2).item() ** 2
143
+ epoch_grad_norms.append(grad_norm ** 0.5)
144
+
145
+ if (i + 1) % ACCUMULATION_STEPS == 0:
146
+ scaler.step(optimizer)
147
+ scaler.update()
148
+ optimizer.zero_grad()
149
+
150
+ # Track pure classification loss for clean CSV logging
151
+ running_loss += loss.item() * ACCUMULATION_STEPS
152
+
153
+ _, preds = torch.max(outputs, 1)
154
+ all_preds.extend(preds.cpu().numpy())
155
+ all_labels.extend(labels.cpu().numpy())
156
+ total_iterations_counter += 1
157
+
158
+ pbar.set_postfix(loss=f"{(loss.item()*ACCUMULATION_STEPS):.4f}")
159
+
160
+ # --- A. Evaluation ---
161
+ ct_tracker.epoch_end()
162
+ epoch_end_time = time.time()
163
+ epoch_duration = epoch_end_time - epoch_start_time
164
+ avg_it_per_sec = len(loader) / epoch_duration
165
+
166
+ acc = accuracy_score(all_labels, all_preds)
167
+ p, r, f1, _ = precision_recall_fscore_support(all_labels, all_preds, average='macro', zero_division=0)
168
+
169
+ # Rigorous Inference Latency (With Warm-up)
170
+ model.eval()
171
+ with torch.no_grad():
172
+ sample_img = torch.randn(1, 3, 224, 224).to(DEVICE)
173
+ _ = model(sample_img)
174
+ torch.cuda.synchronize()
175
+
176
+ starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
177
+ starter.record()
178
+ _ = model(sample_img)
179
+ ender.record()
180
+ torch.cuda.synchronize()
181
+ lat_ms = starter.elapsed_time(ender)
182
+
183
+ # --- B. Energy & Power Calculations ---
184
+ emissions_data = cc_tracker._prepare_emissions_data()
185
+
186
+ cum_gpu_j = emissions_data.gpu_energy * 3.6e6
187
+ cum_cpu_j = emissions_data.cpu_energy * 3.6e6
188
+ cum_ram_j = emissions_data.ram_energy * 3.6e6
189
+ cum_total_j = cum_gpu_j + cum_cpu_j + cum_ram_j
190
+
191
+ epoch_gpu_j = cum_gpu_j - prev_cum_gpu_j
192
+ epoch_cpu_j = cum_cpu_j - prev_cum_cpu_j
193
+ epoch_ram_j = cum_ram_j - prev_cum_ram_j
194
+ epoch_total_j = epoch_gpu_j + epoch_cpu_j + epoch_ram_j
195
+
196
+ prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = cum_gpu_j, cum_cpu_j, cum_ram_j
197
+
198
+ avg_gpu_w = epoch_gpu_j / epoch_duration if epoch_duration > 0 else 0
199
+ avg_cpu_w = epoch_cpu_j / epoch_duration if epoch_duration > 0 else 0
200
+ avg_ram_w = epoch_ram_j / epoch_duration if epoch_duration > 0 else 0
201
+
202
+ vram_peak = torch.cuda.max_memory_allocated(DEVICE) / (1024**3)
203
+
204
+ acc_gain = acc - prev_acc
205
+ eag = acc_gain / epoch_total_j if epoch_total_j > 0 else 0
206
+ prev_acc = acc
207
+
208
+ # --- C. Minimal Terminal Update ---
209
+ print(f"Epoch {epoch+1}/{NUM_EPOCHS} | Acc: {acc:.4f} | Loss: {running_loss/len(loader):.4f} | Energy: {epoch_total_j:.1f}J | Latency: {lat_ms:.2f}ms")
210
+
211
+ # --- D. Unified Verified CSV Logging ---
212
+ log_entry = {
213
+ "epoch": epoch + 1,
214
+ "loss": running_loss / len(loader),
215
+ "accuracy": acc, "f1_score": f1, "precision": p, "recall": r,
216
+ "epoch_energy_gpu_j": epoch_gpu_j, "epoch_energy_cpu_j": epoch_cpu_j,
217
+ "epoch_energy_ram_j": epoch_ram_j, "epoch_total_energy_j": epoch_total_j,
218
+ "cumulative_total_energy_j": cum_total_j, "carbon_emissions_kg": emissions_data.emissions,
219
+ "avg_power_gpu_w": avg_gpu_w, "avg_power_cpu_w": avg_cpu_w, "avg_power_ram_w": avg_ram_w,
220
+ "vram_peak_gb": vram_peak, "latency_ms": lat_ms, "avg_grad_norm": np.mean(epoch_grad_norms),
221
+ "eag_metric": eag, "it_per_sec": avg_it_per_sec, "total_iterations": total_iterations_counter,
222
+ "epoch_duration_sec": epoch_duration, "cumulative_time_sec": time.time() - session_start_time
223
+ }
224
+ all_logs.append(log_entry)
225
+ pd.DataFrame(all_logs).to_csv(LOG_FILE, index=False)
226
+
227
+ cc_tracker.stop()
228
+
229
+ # --- E. Save Optimized Model ---
230
+ torch.save(model.state_dict(), MODEL_SAVE_PATH)
231
+ print(f"\n[FINISH] Verified Optimization Complete. Model and CSV Saved.")
232
+
233
+ if __name__ == "__main__":
234
+ run_experiment()