Shanmuk4622 commited on
Commit
e65ba3d
·
verified ·
1 Parent(s): 0ad61c0

Upload test3/eden_ResNet50_CIFAR100.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test3/eden_ResNet50_CIFAR100.py +166 -0
test3/eden_ResNet50_CIFAR100.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import torchvision
5
+ import torchvision.transforms as transforms
6
+ from torch.utils.data import DataLoader, TensorDataset
7
+ from sklearn.metrics import f1_score, precision_score, recall_score
8
+ from codecarbon import EmissionsTracker
9
+ from thop import profile
10
+ from tqdm import tqdm
11
+ import time, pandas as pd, numpy as np, os, warnings, copy, gc
12
+
13
+ # --- Configuration ---
14
+ MODEL_NAME = "resnet50_EDEN"
15
+ DATASET_NAME = "CIFAR100"
16
+ DATA_PATH = r'C:\Users\shanm\Dataset Download\CIFAR100'
17
+ BATCH_SIZE = 64
18
+ ACCUMULATION_STEPS = 8
19
+ EPOCHS = 20
20
+ E_UNFREEZE = 10
21
+ LAMBDA_L1 = 1e-5
22
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
+
24
+ SAVE_DIR = "saved_models"
25
+ os.makedirs(SAVE_DIR, exist_ok=True)
26
+ CSV_FILENAME = f"{MODEL_NAME}_{DATASET_NAME}_stats.csv"
27
+
28
+ warnings.filterwarnings("ignore")
29
+ os.environ["CODECARBON_LOG_LEVEL"] = "error"
30
+
31
+ def main():
32
+ # --- Phase 1: Zero-Overhead Initialization (RAM Caching) ---
33
+ transform = transforms.Compose([
34
+ transforms.Resize(224),
35
+ transforms.ToTensor(),
36
+ transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2673, 0.2564, 0.2762)),
37
+ ])
38
+
39
+ print(f"[*] Caching {DATASET_NAME} to System RAM...")
40
+ try:
41
+ full_dataset = torchvision.datasets.CIFAR100(root=DATA_PATH, train=True, download=False, transform=transform)
42
+ except:
43
+ full_dataset = torchvision.datasets.CIFAR100(root=os.path.dirname(DATA_PATH), train=True, download=False, transform=transform)
44
+
45
+ all_data, all_targets = [], []
46
+ for i, (img, target) in enumerate(full_dataset):
47
+ all_data.append(img)
48
+ all_targets.append(target)
49
+ if i % 10000 == 0: print(f" Loaded {i}/50000 images...")
50
+
51
+ cached_trainset = TensorDataset(torch.stack(all_data), torch.tensor(all_targets))
52
+ trainloader = DataLoader(cached_trainset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True)
53
+
54
+ # --- Model Setup (EDEN Phase 1) ---
55
+ model = torchvision.models.resnet50(weights='IMAGENET1K_V1')
56
+ model.fc = nn.Linear(model.fc.in_features, 100)
57
+
58
+ # 1. Profile on clone to avoid thop attribute error
59
+ print("[*] Calculating hardware metrics...")
60
+ model_for_profile = copy.deepcopy(model).to(DEVICE)
61
+ dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)
62
+ flops, params = profile(model_for_profile, inputs=(dummy_input, ), verbose=False)
63
+ del model_for_profile
64
+
65
+ # 2. Initially freeze backbone
66
+ for name, param in model.named_parameters():
67
+ if "fc" not in name:
68
+ param.requires_grad = False
69
+
70
+ model.to(DEVICE)
71
+
72
+ criterion = nn.CrossEntropyLoss()
73
+ optimizer = optim.AdamW(model.parameters(), lr=1e-3)
74
+ scaler = torch.cuda.amp.GradScaler()
75
+
76
+ results = []
77
+ cumulative_total_energy = 0
78
+ best_acc = 0.0
79
+
80
+ tracker = EmissionsTracker(measure_power_secs=1, save_to_file=False, log_level='error')
81
+
82
+ print(f"\n[MODEL INFO] FLOPs: {flops/1e9:.2f} G | Parameters: {params/1e6:.2f} M | Batch Size: {BATCH_SIZE}")
83
+ print(f"{'='*140}")
84
+ print(f"{'Epoch':<6} | {'Loss':<7} | {'Acc':<7} | {'Total(J)':<9} | {'VRAM(GB)':<9} | {'EAG':<8} | {'Status'}")
85
+ print(f"{'-'*140}")
86
+
87
+ for epoch in range(1, EPOCHS + 1):
88
+ if epoch == E_UNFREEZE:
89
+ for param in model.parameters(): param.requires_grad = True
90
+ for pg in optimizer.param_groups: pg['lr'] = 1e-5
91
+ status_msg = "UNFROZEN"
92
+ else:
93
+ status_msg = "FROZEN" if epoch < E_UNFREEZE else "FINE-TUNING"
94
+
95
+ model.train()
96
+ tracker.start()
97
+ epoch_start_time = time.time()
98
+ running_loss, all_preds, all_labels = 0.0, [], []
99
+
100
+ # Progress Bar Initialization
101
+ pbar = tqdm(enumerate(trainloader), total=len(trainloader), desc=f"Epoch {epoch:02d}", leave=False)
102
+
103
+ optimizer.zero_grad()
104
+ for i, (inputs, labels) in pbar:
105
+ inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
106
+
107
+ with torch.cuda.amp.autocast():
108
+ outputs = model(inputs)
109
+ cls_loss = criterion(outputs, labels)
110
+ l1_penalty = sum(p.abs().sum() for p in model.parameters() if p.requires_grad)
111
+ loss = (cls_loss + LAMBDA_L1 * l1_penalty) / ACCUMULATION_STEPS
112
+
113
+ scaler.scale(loss).backward()
114
+
115
+ if (i + 1) % ACCUMULATION_STEPS == 0:
116
+ scaler.unscale_(optimizer)
117
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
118
+ scaler.step(optimizer)
119
+ scaler.update()
120
+ optimizer.zero_grad()
121
+
122
+ running_loss += cls_loss.item()
123
+ _, predicted = torch.max(outputs.data, 1)
124
+ all_preds.extend(predicted.cpu().numpy()); all_labels.extend(labels.cpu().numpy())
125
+
126
+ # Update progress bar with current loss
127
+ pbar.set_postfix({'loss': f"{cls_loss.item():.4f}"})
128
+
129
+ emissions_kg = tracker.stop()
130
+ duration = time.time() - epoch_start_time
131
+
132
+ # Energy Metrics
133
+ e_gpu = tracker.final_emissions_data.gpu_energy * 3600000
134
+ e_cpu = tracker.final_emissions_data.cpu_energy * 3600000
135
+ e_ram = tracker.final_emissions_data.ram_energy * 3600000
136
+ total_energy = e_gpu + e_cpu + e_ram
137
+ cumulative_total_energy += total_energy
138
+
139
+ acc = (np.array(all_preds) == np.array(all_labels)).mean()
140
+ vram_peak = torch.cuda.max_memory_allocated(DEVICE) / (1024**3)
141
+ eag = acc / (total_energy / 1000) if total_energy > 0 else 0
142
+
143
+ # Detailed Audit Row
144
+ epoch_stats = {
145
+ "epoch": epoch, "status": status_msg, "loss": running_loss / len(trainloader),
146
+ "accuracy": acc, "total_energy_j": total_energy, "cumulative_energy_j": cumulative_total_energy,
147
+ "carbon_kg": emissions_kg, "vram_gb": vram_peak, "eag_metric": eag,
148
+ "model_flops": flops, "model_params": params
149
+ }
150
+ results.append(epoch_stats)
151
+ pd.DataFrame(results).to_csv(CSV_FILENAME, index=False)
152
+
153
+ best_tag = "*" if acc > best_acc else ""
154
+ if acc > best_acc: best_acc = acc; torch.save(model.state_dict(), os.path.join(SAVE_DIR, f"BEST_{MODEL_NAME}.pth"))
155
+
156
+ print(f"{epoch:02d}/50 | {epoch_stats['loss']:.4f} | {acc:.2%} | {total_energy:<9.2f} | {vram_peak:<9.3f} | {eag:<8.4f} | {status_msg}{best_tag}")
157
+
158
+ # Explicit memory cleanup for next model in run.bat
159
+ del model, trainloader, cached_trainset
160
+ torch.cuda.empty_cache()
161
+ gc.collect()
162
+
163
+ print(f"{'='*140}\n[FINISH] ResNet-50 on CIFAR-100 complete.")
164
+
165
+ if __name__ == '__main__':
166
+ main()