import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm

from torchvision import transforms
import torchvision

from torch.utils.data import DataLoader
import math

from PIL import Image

quantized_max_int = 1
num_epochs_to_freeze = 1

def e_x(num):
    return math.exp(num)


def quantized_loss(weights):
    weight_quan_loss = 0
    abs_max_value = torch.max(torch.abs(weights))
    weight_scale = quantized_max_int/abs_max_value
    for weight in weights:
        weight_quan_loss += (torch.abs(torch.round(weight_scale*weight) - weight_scale*weight))

    return weight_quan_loss

def quan_loss(model,ratio):
    all_quantized_loss = 0
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d) and name == 'conv1' :
            # 获取当前卷积层的权重
            weight = module.weight
            # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
            num_groups = weight.shape[0]
            for i in range(num_groups):
                group_weight = weight[i, :, :, :]
                weight_quan_loss = 0
                abs_max_value = torch.max(torch.abs(group_weight))
                weight_scale = quantized_max_int / abs_max_value
                for quan_weight in group_weight:
                    weight_quan_loss += (torch.norm(torch.round(weight_scale * quan_weight) - weight_scale * quan_weight))*ratio
                # all_quantized_loss +=  quantized_loss(group_weight, 1)
                all_quantized_loss += weight_quan_loss
    return all_quantized_loss


def quan_loss_layer1(model, ratio):
    all_quantized_loss = 0
    for name, module in model.named_modules():
        if name.startswith('layer1.'):  # 匹配第二层及其子模块
            for submodule_name, submodule in module.named_modules():
                if isinstance(submodule, nn.Conv2d):
                    # 获取当前卷积层的权重
                    weight = submodule.weight
                    # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
                    num_groups = weight.shape[0]
                    for i in range(num_groups):
                        group_weight = weight[i, :, :, :]
                        weight_quan_loss = 0
                        abs_max_value = torch.max(torch.abs(group_weight))
                        weight_scale = quantized_max_int / abs_max_value
                        for quan_weight in group_weight:
                            weight_quan_loss += (torch.norm(
                                torch.round(weight_scale * quan_weight) - weight_scale * quan_weight)) * ratio
                        all_quantized_loss += weight_quan_loss
    return all_quantized_loss

num_conv_groups = [64, 64, 128, 128, 256, 256, 512, 512]  # 假设每个卷积层的输出通道数
l1_alpha_per_group = [0.001, 0.001, 0.002, 0.002, 0.003, 0.003, 0.004, 0.004]

def group_lasso_regularization_differ(model, l1_alpha_per_group):
    l1_loss = 0
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d) :
            # 获取当前卷积层的权重
            weight = module.weight
            # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
            num_groups = weight.shape[0]
            for i in range(8,16):
                group_weight = weight[i, :, :, :]
                l1_loss += l1_alpha_per_group[i-8] * torch.norm(group_weight, 1)
    return l1_loss


def l2_regularization(model, l2_alpha):
    l2_loss = []
    for module in model.modules():
        if type(module) is nn.Conv2d:
            l2_loss.append((module.weight ** 2).sum() / 2.0)
    return l2_alpha * sum(l2_loss)


softmax = torch.nn.Softmax(dim=1)
sigmoid = torch.nn.Sigmoid()

def resize_longest_side(image, max_length):
    width, height = image.size
    if width > height:
        new_width = max_length
        new_height = int(new_width * height / width)
    else:
        new_height = max_length
        new_width = int(new_height * width / height)
    return image.resize((new_width, new_height), Image.BILINEAR)

def pad_to_square(image):
    width, height = image.size
    max_dim = max(width, height)
    left = (max_dim - width) // 2
    top = (max_dim - height) // 2
    right = max_dim - width - left
    bottom = max_dim - height - top

    padding = (left, top, right, bottom)
    return transforms.functional.pad(image, padding, fill=0)

pic_size=384

transform = transforms.Compose([
    transforms.Lambda(lambda img: resize_longest_side(img, pic_size)),  # Resize the longest side to 384
    transforms.Lambda(pad_to_square),  # Pad to square 384x384
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

transformtrain = transforms.Compose([
    transforms.Lambda(lambda img: resize_longest_side(img, pic_size)),  # Resize the longest side to 384
    transforms.Lambda(pad_to_square),  # Pad to square 384x384
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    torchvision.transforms.RandomErasing(p=0.2, scale=(0.02, 0.2), ratio=(0.5, 2), value=0, inplace=False),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def make_dir(path):
    import os
    dir = os.path.exists(path)
    if not dir:
        os.makedirs(path)
make_dir('models')

batch_size = 8

train_set = torchvision.datasets.ImageFolder(root='../data/sdfcar/train', transform=transformtrain)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                          num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

val_dataset = torchvision.datasets.ImageFolder(root='../data/sdfcar/valid', transform=transform)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
                        num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = torchvision.models.resnet50(weights=True)
# net = torch.load("models/0-0.00000_59.482%_679.28232_82.872%_quan_conv1_1bitfreezen.pth")
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 196)  # 将输出维度修改为2

criterion = nn.CrossEntropyLoss()
net = net.to(device)
optimizer = torch.optim.AdamW(lr=0.0001, params=net.parameters())
eposhs = 35

l1_alpha = 0.001  # 正则化系数


for epoch in range(eposhs):
    print(f'--------------------{epoch}--------------------')
    correct_train = 0
    sum_loss_train = 0
    total_correct_train = 0
    quan_loss_record =0

    count =0
    for inputs, labels in tqdm(train_loader):
        count += 1
        inputs = inputs.to(device)
        labels = labels.to(device)

        # get weight
        conv1_weight = net.conv1.weight.data

        quantized_max_int = 1

        output = net(inputs)
        loss = criterion(output, labels) #+ quan_loss_record
        total_correct_train = total_correct_train + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        loss.backward()
        optimizer.step()
        correct_train = correct_train + (predicted == labels).sum().item()
    print("quan_loss: ", quan_loss_record)
    acc_train = correct_train / (total_correct_train+1)
    print('训练准确率是{:.3f}%:'.format(acc_train*100) )
    if(epoch==0):
        optimizer = torch.optim.AdamW(lr=0.00008, params=net.parameters())
    if (epoch == 6):
        optimizer = torch.optim.AdamW(lr=0.00005, params=net.parameters())
    if (epoch == 10):
        optimizer = torch.optim.AdamW(lr=0.00002, params=net.parameters())
    if (epoch == 16):
        optimizer = torch.optim.AdamW(lr=0.000005, params=net.parameters())
    if (epoch == 20):
        optimizer = torch.optim.AdamW(lr=0.000002, params=net.parameters())
    if (epoch == 25):
        optimizer = torch.optim.AdamW(lr=0.0000007, params=net.parameters())
    if (epoch == 30):
        optimizer = torch.optim.AdamW(lr=0.0000004, params=net.parameters())

    net.eval()
    correct_val = 0
    sum_loss_val = 0
    total_correct_val = 0
    for inputs, labels in tqdm(val_loader):
        inputs = inputs.to(device)
        labels = labels.to(device)
        output = net(inputs)

        loss = criterion(output, labels)
        sum_loss_val = sum_loss_val + loss.item()
        output = net(inputs)
        total_correct_val = total_correct_val + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        correct_val = correct_val + (predicted == labels).sum().item()

    acc_val = correct_val / total_correct_val
    print('验证准确率是{:.3f}%:'.format(acc_val*100) )
    torch.save(net,'models/{}-{:.5f}_{:.3f}%_{:.5f}_{:.3f}%resnet50basegray.pth'.format(epoch,sum_loss_train,acc_train *100,sum_loss_val,acc_val*100))