Shanmuk4622 commited on
Commit
5ff8c0d
·
verified ·
1 Parent(s): aac87ab

Upload test3/eden_AlexNet_CIFAR100.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test3/eden_AlexNet_CIFAR100.py +181 -0
test3/eden_AlexNet_CIFAR100.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import torchvision
5
+ import torchvision.transforms as transforms
6
+ from torch.utils.data import DataLoader, TensorDataset
7
+ from sklearn.metrics import f1_score, precision_score, recall_score
8
+ from codecarbon import EmissionsTracker
9
+ from thop import profile
10
+ import time
11
+ import pandas as pd
12
+ import numpy as np
13
+ import os
14
+ import warnings
15
+ import copy
16
+ from datetime import timedelta
17
+
18
+ # --- Configuration ---
19
+ MODEL_NAME = "alexnet_EDEN"
20
+ DATASET_NAME = "CIFAR100"
21
+ DATA_PATH = r'C:\Users\shanm\Dataset Download\CIFAR100'
22
+ BATCH_SIZE = 128
23
+ ACCUMULATION_STEPS = 4 # Effective Batch Size = 512
24
+ EPOCHS = 15
25
+ E_UNFREEZE = 10
26
+ LAMBDA_L1 = 1e-5
27
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+
29
+ SAVE_DIR = "saved_models"
30
+ os.makedirs(SAVE_DIR, exist_ok=True)
31
+ CSV_FILENAME = f"{MODEL_NAME}_{DATASET_NAME}_stats.csv"
32
+
33
+ warnings.filterwarnings("ignore")
34
+ os.environ["CODECARBON_LOG_LEVEL"] = "error"
35
+
36
+ def main():
37
+ # --- Phase 1: Zero-Overhead Initialization (RAM Caching) ---
38
+ transform = transforms.Compose([
39
+ transforms.Resize(224), # AlexNet pre-trained expects 224x224
40
+ transforms.ToTensor(),
41
+ transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2673, 0.2564, 0.2762)),
42
+ ])
43
+
44
+ print(f"[*] Caching {DATASET_NAME} to System RAM for zero-I/O overhead...")
45
+ try:
46
+ full_dataset = torchvision.datasets.CIFAR100(root=DATA_PATH, train=True, download=False, transform=transform)
47
+ except:
48
+ full_dataset = torchvision.datasets.CIFAR100(root=os.path.dirname(DATA_PATH), train=True, download=False, transform=transform)
49
+
50
+ all_data, all_targets = [], []
51
+ for i, (img, target) in enumerate(full_dataset):
52
+ all_data.append(img)
53
+ all_targets.append(target)
54
+ if i % 10000 == 0: print(f" Loaded {i}/50000 images...")
55
+
56
+ cached_trainset = TensorDataset(torch.stack(all_data), torch.tensor(all_targets))
57
+ trainloader = DataLoader(cached_trainset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True)
58
+
59
+ # --- Model Setup ---
60
+ model = torchvision.models.alexnet(weights='IMAGENET1K_V1')
61
+ model.classifier[6] = nn.Linear(4096, 100) # 100 classes for CIFAR-100
62
+
63
+ # 1. Calculate FLOPs on a temporary clone to avoid hook attribute errors
64
+ print("[*] Calculating hardware metrics (FLOPs/Params)...")
65
+ model_for_profile = copy.deepcopy(model).to(DEVICE)
66
+ dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)
67
+ flops, params = profile(model_for_profile, inputs=(dummy_input, ), verbose=False)
68
+ del model_for_profile # Clean up clone
69
+
70
+ # 2. Freeze backbone for EDEN Phase 2 (Initial State)
71
+ for param in model.features.parameters():
72
+ param.requires_grad = False
73
+
74
+ model.to(DEVICE)
75
+
76
+ criterion = nn.CrossEntropyLoss()
77
+ optimizer = optim.AdamW(model.parameters(), lr=1e-3)
78
+ scaler = torch.cuda.amp.GradScaler() # For Automated Mixed Precision (AMP)
79
+
80
+ results = []
81
+ cumulative_total_energy = 0
82
+ total_start_time = time.time()
83
+ best_acc = 0.0
84
+
85
+ tracker = EmissionsTracker(measure_power_secs=1, save_to_file=False, log_level='error')
86
+
87
+ print(f"\n[MODEL INFO] FLOPs: {flops/1e9:.2f} G | Parameters: {params/1e6:.2f} M | Batch Size: {BATCH_SIZE}")
88
+ print(f"{'='*140}")
89
+ print(f"{'Epoch':<6} | {'Loss':<7} | {'Acc':<7} | {'Total(J)':<9} | {'VRAM(GB)':<9} | {'EAG':<8} | {'Status'}")
90
+ print(f"{'-'*140}")
91
+
92
+ for epoch in range(1, EPOCHS + 1):
93
+ # --- EDEN Progressive Unfreezing ---
94
+ if epoch == E_UNFREEZE:
95
+ for param in model.parameters():
96
+ param.requires_grad = True
97
+ for param_group in optimizer.param_groups:
98
+ param_group['lr'] = 1e-5
99
+ status_msg = "UNFROZEN"
100
+ else:
101
+ status_msg = "FROZEN" if epoch < E_UNFREEZE else "FINE-TUNING"
102
+
103
+ model.train()
104
+ tracker.start()
105
+ epoch_start_time = time.time()
106
+ running_loss, all_preds, all_labels, grad_norms = 0.0, [], [], []
107
+
108
+ optimizer.zero_grad()
109
+ for i, (inputs, labels) in enumerate(trainloader):
110
+ inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
111
+
112
+ with torch.cuda.amp.autocast():
113
+ outputs = model(inputs)
114
+ cls_loss = criterion(outputs, labels)
115
+
116
+ # EDEN Sparse Training Penalty (L1)
117
+ l1_penalty = sum(p.abs().sum() for p in model.parameters() if p.requires_grad)
118
+ loss = (cls_loss + LAMBDA_L1 * l1_penalty) / ACCUMULATION_STEPS
119
+
120
+ scaler.scale(loss).backward()
121
+
122
+ # Gradient Accumulation Logic
123
+ if (i + 1) % ACCUMULATION_STEPS == 0:
124
+ scaler.unscale_(optimizer)
125
+ grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
126
+ grad_norms.append(grad_norm.item())
127
+
128
+ scaler.step(optimizer)
129
+ scaler.update()
130
+ optimizer.zero_grad()
131
+
132
+ running_loss += cls_loss.item()
133
+ _, predicted = torch.max(outputs.data, 1)
134
+ all_preds.extend(predicted.cpu().numpy())
135
+ all_labels.extend(labels.cpu().numpy())
136
+
137
+ emissions_kg = tracker.stop()
138
+ duration = time.time() - epoch_start_time
139
+
140
+ # Energy Metrics (kWh -> Joules)
141
+ e_gpu = tracker.final_emissions_data.gpu_energy * 3600000
142
+ e_cpu = tracker.final_emissions_data.cpu_energy * 3600000
143
+ e_ram = tracker.final_emissions_data.ram_energy * 3600000
144
+ total_energy = e_gpu + e_cpu + e_ram
145
+ cumulative_total_energy += total_energy
146
+
147
+ acc = (np.array(all_preds) == np.array(all_labels)).mean()
148
+ f1 = f1_score(all_labels, all_preds, average='macro')
149
+ vram_peak = torch.cuda.max_memory_allocated(DEVICE) / (1024**3)
150
+ eag = acc / (total_energy / 1000) if total_energy > 0 else 0
151
+
152
+ # CSV Logging
153
+ epoch_stats = {
154
+ "epoch": epoch, "status": status_msg, "loss": running_loss / len(trainloader),
155
+ "accuracy": acc, "f1_score": f1,
156
+ "precision": precision_score(all_labels, all_preds, average='macro', zero_division=0),
157
+ "recall": recall_score(all_labels, all_preds, average='macro', zero_division=0),
158
+ "energy_gpu_j": e_gpu, "energy_cpu_j": e_cpu, "energy_ram_j": e_ram,
159
+ "total_energy_j": total_energy, "cumulative_total_energy_j": cumulative_total_energy,
160
+ "carbon_kg": emissions_kg, "vram_gb": vram_peak,
161
+ "latency_ms": (duration / len(trainloader)) * 1000,
162
+ "eag_metric": eag, "grad_norm": np.mean(grad_norms) if grad_norms else 0,
163
+ "model_flops": flops, "model_params": params,
164
+ "batch_size": BATCH_SIZE, "accumulation_steps": ACCUMULATION_STEPS
165
+ }
166
+ results.append(epoch_stats)
167
+ pd.DataFrame(results).to_csv(CSV_FILENAME, index=False)
168
+
169
+ if acc > best_acc:
170
+ best_acc = acc
171
+ torch.save(model.state_dict(), os.path.join(SAVE_DIR, f"BEST_{MODEL_NAME}_{DATASET_NAME}.pth"))
172
+ best_tag = "*"
173
+ else:
174
+ best_tag = ""
175
+
176
+ print(f"{epoch:02d}/50 | {epoch_stats['loss']:.4f} | {acc:.2%} | {total_energy:<9.2f} | {vram_peak:<9.3f} | {eag:<8.4f} | {status_msg}{best_tag}")
177
+
178
+ print(f"{'='*140}\n[FINISH] AlexNet on CIFAR-100 saved to {CSV_FILENAME}")
179
+
180
+ if __name__ == '__main__':
181
+ main()