import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
import numpy as np

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)

# ---------------------------
# 1. DATASETS & DATALOADERS
# ---------------------------
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

print("Loading CIFAR-10 (victim task)...")
train_data = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_data  = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_data, batch_size=128, shuffle=True)
test_loader  = DataLoader(test_data,  batch_size=256, shuffle=False)

print("Loading CIFAR-100 as proxy (attacker)...")
proxy_data = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform)
proxy_loader = DataLoader(proxy_data, batch_size=256, shuffle=False)

# ---------------------------
# 2. TRAIN VICTIM MODEL
# ---------------------------
def train_epoch(model, loader, optimizer, criterion):
    model.train()
    running_loss = 0.0
    for data, target in loader:
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        out = model(data)
        loss = criterion(out, target)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * data.size(0)
    return running_loss / len(loader.dataset)

def eval_acc(model, loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for data, target in loader:
            data, target = data.to(device), target.to(device)
            out = model(data)
            pred = out.argmax(dim=1)
            correct += pred.eq(target).sum().item()
            total += target.size(0)
    return 100.0 * correct / total

print("Building victim (ResNet-18)...")
victim = models.resnet18(weights=None)
victim.fc = nn.Linear(victim.fc.in_features, 10)
victim = victim.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(victim.parameters(), lr=0.01, momentum=0.9)

EPOCHS_VICTIM = 5  # tu peux augmenter à 10 si tu veux
print("Training victim model...")
for epoch in range(1, EPOCHS_VICTIM + 1):
    loss = train_epoch(victim, train_loader, optimizer, criterion)
    acc = eval_acc(victim, test_loader)
    print(f"[Victim] Epoch {epoch}/{EPOCHS_VICTIM}  Loss={loss:.4f}  Test Acc={acc:.2f}%")

victim_acc = eval_acc(victim, test_loader)
print(f"Final victim accuracy on CIFAR-10: {victim_acc:.2f}%")

# Save victim (like in local project)
torch.save(victim.state_dict(), "victim_model.pth")
print("Saved victim_model.pth")

# ---------------------------
# 3. QUERY VICTIM → BUILD KNOCKOFF DATASET
# ---------------------------
print("Querying victim on proxy dataset to build knockoff data...")

all_inputs = []
all_labels = []

victim.eval()
with torch.no_grad():
    for data, _ in proxy_loader:
        data = data.to(device)
        out = victim(data)
        pred = out.argmax(dim=1)          # victim's predicted labels
        all_inputs.append(data.cpu())
        all_labels.append(pred.cpu())

X_knockoff = torch.cat(all_inputs, dim=0)
y_knockoff = torch.cat(all_labels, dim=0)

print(f"Knockoff dataset size: {X_knockoff.shape[0]} samples")

# Option: you can subsample to go faster
MAX_SAMPLES = 20000  # par ex. 20k requêtes seulement
if X_knockoff.shape[0] > MAX_SAMPLES:
    idx = torch.randperm(X_knockoff.shape[0])[:MAX_SAMPLES]
    X_knockoff = X_knockoff[idx]
    y_knockoff = y_knockoff[idx]
    print(f"Subsampled knockoff dataset to {MAX_SAMPLES} samples")

knockoff_ds = torch.utils.data.TensorDataset(X_knockoff, y_knockoff)
knockoff_loader = DataLoader(knockoff_ds, batch_size=128, shuffle=True)

# ---------------------------
# 4. TRAIN SURROGATE MODEL
# ---------------------------
print("Building surrogate (ResNet-18)...")
surrogate = models.resnet18(weights=None)
surrogate.fc = nn.Linear(surrogate.fc.in_features, 10)
surrogate = surrogate.to(device)

optimizer_s = optim.SGD(surrogate.parameters(), lr=0.01, momentum=0.9)
criterion_s = nn.CrossEntropyLoss()

EPOCHS_SURROGATE = 5
print("Training surrogate on knockoff data...")
for epoch in range(1, EPOCHS_SURROGATE + 1):
    loss = train_epoch(surrogate, knockoff_loader, optimizer_s, criterion_s)
    acc = eval_acc(surrogate, test_loader)
    print(f"[Surrogate] Epoch {epoch}/{EPOCHS_SURROGATE}  Loss={loss:.4f}  Test Acc={acc:.2f}%")

surrogate_acc = eval_acc(surrogate, test_loader)
print(f"Final surrogate accuracy on CIFAR-10: {surrogate_acc:.2f}%")

# ---------------------------
# 5. OPTIONAL: MEASURE FIDELITY
# ---------------------------
print("Measuring fidelity (agreement) between victim and surrogate...")
victim.eval()
surrogate.eval()
agree = 0
total = 0
with torch.no_grad():
    for data, _ in test_loader:
        data = data.to(device)
        v_out = victim(data)
        s_out = surrogate(data)
        v_pred = v_out.argmax(dim=1)
        s_pred = s_out.argmax(dim=1)
        agree += (v_pred == s_pred).sum().item()
        total += data.size(0)

fidelity = 100.0 * agree / total
print(f"Fidelity (victim vs surrogate) on CIFAR-10 test: {fidelity:.2f}%")

print("\nDONE: KnockoffNets-style task-accuracy model extraction reproduced in Colab!")
print(f"Victim accuracy:    {victim_acc:.2f}%")
print(f"Surrogate accuracy: {surrogate_acc:.2f}%")
print(f"Fidelity:           {fidelity:.2f}%")
