from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter

from models.conv2d import RobustConv2d
import math

import random
import numpy as np
import sys
import os
import argparse
import time
import logging

time_str = time.strftime('%Y-%m-%d-%H-%M')

randomSeed = 1
random.seed(randomSeed)  # python random seed
torch.manual_seed(randomSeed)  # pytorch random seed
np.random.seed(randomSeed)  # numpy random seed

path = ''


parser = argparse.ArgumentParser(description='LeNet')

parser.add_argument('--norm',type=str, default='L12')
parser.add_argument('--K',type=int, default=1)
parser.add_argument('--e',type=float, default=5e-5)

parser.add_argument('--lmbd',type=float, default=0.6) # balance between L2 and L1

parser.add_argument('--lr',type=float, default=0.001)

parser.add_argument('--pretrain',type=int, default=1)

parser.add_argument('--train_mode',type=int, default=1) # 1: training and testing, 0: testing only
parser.add_argument('--adv_train', action='store_true', default=False)
parser.add_argument('--epsilon',type=float, default=0.3) # adv train with epsilon
parser.add_argument('--attack',type=str, default='Linf') # attack type: Linf, L2, L1, L0

parser.add_argument('--paradigm',type=int, default=1) # 1: only train lambda, 2: train all, 3: fine-tune all

parser.add_argument('--model_load_path', type=str, default=None, help='Path to load a pre-trained model state dict')
parser.add_argument('--no-console-log', action='store_true', default=False, help='Do not print logs to console')
parser.add_argument('--method', type=str, default='NRPM', help='Method to use: NRPM or LPM')
parser.add_argument('--model_save_path', type=str, default=None, help='Path to save a newly trained model')


args = parser.parse_args()
arch = "lenet"
pretrained_model = path + f"lenet_mnist_model.pth"

use_cuda=True

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("CUDA Available: ", torch.cuda.is_available())

# data
transform_train = transforms.Compose([
    transforms.ToTensor(),
])
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST(path + 'data', train=True, download=True, transform=transform_train),
    batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST(path + 'data', train=False, download=True, transform=transform_train),
    batch_size=64, shuffle=True)


class RobustLinear(nn.Module):

    __constants__ = ['in_features', 'out_features']
    in_features: int
    out_features: int
    weight: Tensor

    def __init__(self, in_features: int, out_features: int, bias: bool = True,
                 device=None, dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
        self.robust_sum = RobustSum(K=3, norm="L2", gamma=4.0, delta=3.0)
        if bias:
            self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self) -> None:
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
            nn.init.uniform_(self.bias, -bound, bound)

    def forward(self, input: Tensor) -> Tensor:
        y, z0 = self.robust_sum(input, self.weight.T)
        
        # The combination logic for linear layers. Using 'L21' as a convention.
        if self.robust_sum.norm == "L21":
            weight = torch.sigmoid(self.robust_sum.lmbd) if args.train_mode == 1 else self.robust_sum.lmbd
            return weight * z0 + (1-weight) * y
        else:
            return y

    def extra_repr(self) -> str:
        return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'


class RobustSum(nn.Module):
    def __init__(self, K=3, norm="L2", gamma=4.0, delta=3.0, epsilon=1e-3, lmbd = 1.0):
        super().__init__()
        self.K=K
        self.norm=norm
        self.gamma=gamma
        self.delta=delta
        self.epsilon = epsilon
        self.lmbd = torch.nn.Parameter(torch.tensor([lmbd]), requires_grad=True)

    def forward(self, x, weight):
        D1 = weight.shape[0]
        z = torch.matmul(x, weight)
        z0 = z
        if self.norm == 'L2':
            return z, z0
        
        xw = x.unsqueeze(1) * weight.T.unsqueeze(0)
        
        for _ in range(self.K):
            dist = torch.abs(xw - z.unsqueeze(-1)/D1)
            
            if self.norm == "L2":
                w = torch.ones(dist.shape).cuda()
            elif self.norm in  ['L1', "L21"]:
                w = 1/(dist+self.epsilon)
                
            w_norm = torch.nn.functional.normalize(w,p=1,dim=-1)
            z = D1 * (w_norm * xw).sum(dim=-1)
            torch.cuda.empty_cache()
        
        return z, z0

class Net(nn.Module):
    def __init__(self, method='NRPM'):
        super(Net, self).__init__()
        if method == 'NRPM':
            self.conv1 = RobustConv2d(1, 10, kernel_size=5)
            self.conv2 = RobustConv2d(10, 20, kernel_size=5)
            self.fc1 = RobustLinear(320, 50)
            self.fc2 = RobustLinear(50, 10)
        elif method == 'LPM':
            self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
            self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
            self.fc1 = nn.Linear(320, 50)
            self.fc2 = nn.Linear(50, 10)
        
        self.conv2_drop = nn.Dropout2d()

    def forward(self, x):
        zs = [x]
        x = self.conv1(x)
        if args.method == 'LPM':
            # LPM doesn't have the same return signature
            pass
        else:
            zs.append(x)
        
        x = F.relu(F.max_pool2d(x, 2))
        
        x = self.conv2(x)
        if args.method == 'LPM':
            pass
        else:
            zs.append(x)

        x = F.relu(F.max_pool2d(self.conv2_drop(x), 2))
        x = x.view(-1, 320)
        x = self.fc1(x)
        x = F.dropout(x, training=self.training)
        
        if args.method == 'LPM':
            pass
        else:
            zs.append(x)
        
        x = self.fc2(x)

        if args.method == 'LPM':
            return F.log_softmax(x, dim=1)
        else:
            zs.append(x)
            return F.log_softmax(x, dim=1), zs

# MNIST Test dataset and dataloader declaration
mnist_dataset = datasets.MNIST(
    path+'data',
    train=False,
    download=True,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ]),
)

sampled_dataset = torch.utils.data.Subset(mnist_dataset, range(1000))

test_loader = torch.utils.data.DataLoader(sampled_dataset, batch_size=1, shuffle=True)

# Define what device we are using
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")

# Initialize the network
model = Net(method=args.method).to(device)

# Load the pretrained model
if args.model_load_path or (args.pretrain and args.method == 'NRPM'):
    # Only load pretrained for NRPM, LPM trains from scratch
    path_to_load = args.model_load_path if args.model_load_path else pretrained_model
    device_to_use = device if args.model_load_path else 'cpu'
    
    print(f"Loading pretrained weights from: {path_to_load}")
    
    # Load the state dictionary from the saved model file
    pretrained_dict = torch.load(path_to_load, map_location=device_to_use, weights_only=True)
    
    # Get the state dictionary of the current model instance
    model_dict = model.state_dict()
    
    # Filter pretrained_dict to keep only keys that exist in model_dict
    compatible_pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    
    # Update the current model's state dict with the compatible weights from the pretrained model.
    # Keys in model_dict that are not in compatible_pretrained_dict (e.g., our new 'lmbd' parameters) will be unaffected.
    model_dict.update(compatible_pretrained_dict)
    
    # Load the updated state dictionary into the model.
    # This is now guaranteed to work because model_dict contains a complete set of keys for our model.
    model.load_state_dict(model_dict)
    print("Successfully loaded compatible weights from the pretrained model.")

# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()


# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
    sign_data_grad = data_grad.sign()
    perturbed_image = image + epsilon*sign_data_grad
    perturbed_image = torch.clamp(perturbed_image, 0, 1)
    return perturbed_image

def lnorm_attack(image, epsilon, data_grad):
    p = 1 if args.attack == "L1" else 2
    grad_norms = (
        torch.norm(data_grad.view(-1), p=p)
        + 1e-10
    )
    grad = data_grad / grad_norms
    adv_images = image.detach() + 0.3 * grad
    delta = adv_images - image
    delta_norms = torch.norm(delta.view(-1), p=p)
    factor = epsilon / delta_norms
    delta = delta * factor
    adv_images = torch.clamp(image + delta, min=0, max=1).detach()
    return adv_images

def l0_attack(image, epsilon, data_grad):
    topk_values, topk_indices = torch.topk(data_grad.view(-1), int(epsilon))
    mask = torch.zeros_like(image)
    mask.view(-1)[topk_indices] = 1
    grad = image * mask
    adv_images = image.detach() + 5.0 * grad
    delta = adv_images - image
    adv_images = torch.clamp(image + delta, min=0, max=1).detach()
    return adv_images

def test( model, device, test_loader, epsilon ):
    correct = 0
    adv_examples = []
    
    diff_norms_l1 = [[] for _ in range(5)]
    diff_norms_l2 = [[] for _ in range(5)]
    diff_norms_linf = [[] for _ in range(5)]

    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        data.requires_grad = True
        output, zs = model(data)
        init_pred = output.max(1, keepdim=True)[1] 
        if init_pred.item() != target.item():
            continue

        loss = F.nll_loss(output, target)
        model.zero_grad()
        loss.backward()
        data_grad = data.grad.data
        
        # Call FGSM Attack
        if epsilon != 0:
            if args.attack == 'Linf':
                perturbed_data = fgsm_attack(data, epsilon, data.grad.data)
            else:
                perturbed_data = lnorm_attack(data, epsilon, data.grad.data)
        else:
            perturbed_data = data
        
        # Re-classify the perturbed image
        output = model(perturbed_data)
        if args.method == 'NRPM':
            output = output[0] # Get log_softmax output

        final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
        if final_pred.item() == target.item():
            correct += 1
            if (epsilon == 0) and (len(adv_examples) < 5):
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
        else:
            if len(adv_examples) < 5:
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )

    final_acc = correct/float(len(test_loader))
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
    
    lmbd_vals_linear = [(torch.sigmoid(m.robust_sum.lmbd) if args.train_mode == 1 else m.robust_sum.lmbd).item() for m in model.modules() if isinstance(m, RobustLinear)]
    lmbd_vals_conv = [(torch.sigmoid(m.lmbd) if args.train_mode == 1 else m.lmbd).item() for m in model.modules() if isinstance(m, RobustConv2d)]
    print("lmbd (linear):", lmbd_vals_linear)
    print("lmbd (conv):", lmbd_vals_conv)

    if epsilon > 0:
        print("\n--- Embedding Difference (Clean vs. Adversarial) ---")
        print(f"Attack Epsilon: {epsilon}")
        print("Layer\t|\tL1 Norm\t\t|\tL2 Norm\t\t|\tL-inf Norm")
        print("-"*60)
        layer_names = ["x - x'", "z1 - z1'", "z2 - z2'", "z3 - z3'", "z4 - z4'"]
        for i in range(len(diff_norms_l1)):
            avg_l1 = np.mean(diff_norms_l1[i]) if diff_norms_l1[i] else 0
            avg_l2 = np.mean(diff_norms_l2[i]) if diff_norms_l2[i] else 0
            avg_linf = np.mean(diff_norms_linf[i]) if diff_norms_linf[i] else 0
            print(f"{layer_names[i]}\t|\t{avg_l1:8.2f}\t|\t{avg_l2:8.2f}\t|\t{avg_linf:8.2f}")
        print("-"*60)

    return final_acc, adv_examples

accuracies = []
examples = []

if args.attack == "Linf":
    epsilons = [0, .05, .1, .15, .2, .25, .3]
elif args.attack == 'L2':
    epsilons = [0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
elif args.attack == "L1":
    epsilons = [0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0]
elif args.attack == "L0":
    epsilons = [0, 128, 256, 512]
    
K, norm = args.K, args.norm
print("K, norm, lmbd: ", K, norm, args.lmbd)

def load_params(model):
    i_conv, i_linear = 0, 0
    for module in model.modules():
        if isinstance(module, RobustConv2d):
            module.robust_sum.K = K
            module.robust_sum.norm = args.norm
            module.robust_sum.epsilon = args.e
            module.lmbd.data.fill_(args.lmbd)
            i_conv += 1
        if isinstance(module, RobustLinear):
            module.robust_sum.K = K
            module.robust_sum.norm = "L21" 
            module.robust_sum.epsilon = args.e
            module.robust_sum.lmbd.data.fill_(args.lmbd)
            i_linear += 1
            
    print(f"Replace {i_conv} conv layers and {i_linear} linear layers.")
    return model

model = load_params(model)

def test_all(model_to_test):
    # test natural
    natural_acc = test(model_to_test, device, test_loader, epsilon=0)
    # test adv
    adv_acc = test(model_to_test, device, test_loader, epsilon=args.epsilon)
    return natural_acc, adv_acc

def adv_train(adv=False):
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    
    # Set requires_grad based on the paradigm
    if args.method == 'NRPM':
        if args.paradigm == 1: # only train lambda
            for name, para in model.named_parameters():
                if "lmbd" not in name:
                    para.requires_grad = False
        # For paradigm 2 and 3, all parameters are trainable by default for NRPM
    else: # LPM
        # For LPM, all parameters are always trainable
        for para in model.parameters():
            para.requires_grad = True

    print("\n--- Starting Adversarial Training ---")
    print(f"Method: {args.method}, Paradigm: {args.paradigm}, Adv training: {adv}, Epsilon: {args.epsilon}")
    
    for epoch in range(1, 11): # Train for 10 epochs
        model.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            data.requires_grad = True
            
            # Forward pass to get gradients for attack
            output = model(data)
            if args.method == 'NRPM':
                output = output[0]
            
            loss = F.nll_loss(output, target)
            model.zero_grad()
            loss.backward()

            # Generate adversarial example
            if adv:
                perturbed_data = fgsm_attack(data, args.epsilon, data.grad.data)
            else:
                perturbed_data = data
            
            # Train on the (potentially perturbed) data
            optimizer.zero_grad()
            output = model(perturbed_data)
            if args.method == 'NRPM':
                output = output[0]
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()

            if batch_idx % 100 == 0:
                print(f"Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}] | Loss: {loss.item():.4f}")

        # After each epoch, run a test and log results
        print(f"\nEpoch {epoch} completed. Running tests...")
        model.eval()
        natural_acc, adv_acc = test_all(model)
        log_results(epoch, natural_acc, adv_acc)
    
    # After all epochs are done, save the final model
    print(f"\n--- Adversarial training finished ---")
    if args.model_save_path:
        print(f"Saving robust model to {args.model_save_path}")
        torch.save(model.state_dict(), args.model_save_path)

# Setup logging
log_dir_name = f"mnist_result/figure4/{args.method}/"
if not os.path.exists(log_dir_name):
    os.makedirs(log_dir_name)

# This is a log file for console-like output, not the results csv
filename = f"K{args.K}_norm{args.norm}_lambda{args.lmbd}_adv{args.adv_train}_eps{args.epsilon}_paradigm{args.paradigm}_{time_str}.log"

# Function to log epoch results to a CSV
def log_results(epoch, natural_acc, adv_acc):
    results_csv_path = os.path.join(log_dir_name, "results.csv")
    is_new_file = not os.path.exists(results_csv_path)
    with open(results_csv_path, "a") as f:
        if is_new_file:
            f.write("epoch,natural_accuracy,robust_accuracy\n")
        f.write(f"{epoch},{natural_acc},{adv_acc}\n")

if __name__ == '__main__':
    # Setup logging to file and console
    log_file_path = os.path.join(log_dir_name, filename)
    if not args.no_console_log:
        class Tee(object):
            def __init__(self, *files):
                self.files = files
            def write(self, obj):
                for f in self.files:
                    f.write(obj)
                    f.flush()
            def flush(self):
                for f in self.files:
                    f.flush()
        
        f = open(log_file_path, 'w', buffering=1)
        sys.stdout = Tee(sys.stdout, f)

    if args.train_mode == 1:
        # Clean up previous results if they exist for a fresh run
        results_csv_path = os.path.join(log_dir_name, "results.csv")
        if os.path.exists(results_csv_path):
            os.remove(results_csv_path)
        adv_train(adv=args.adv_train)
    else: # test mode
        test_all(model)



