import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm

from torchvision import transforms
import torchvision

from torch.utils.data import DataLoader
import math

quantized_max_int = 1
num_epochs_to_freeze = 1

def e_x(num):
    return math.exp(num)


def quantized_loss(weights):
    weight_quan_loss = 0
    abs_max_value = torch.max(torch.abs(weights))
    weight_scale = quantized_max_int/abs_max_value
    for weight in weights:
        weight_quan_loss += (torch.abs(torch.round(weight_scale*weight) - weight_scale*weight))

    return weight_quan_loss

def quan_loss(model,ratio):
    all_quantized_loss = 0
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d) and name == 'conv1' :
            # 获取当前卷积层的权重
            weight = module.weight
            # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
            num_groups = weight.shape[0]
            for i in range(num_groups):
                group_weight = weight[i, :, :, :]
                weight_quan_loss = 0
                abs_max_value = torch.max(torch.abs(group_weight))
                weight_scale = quantized_max_int / abs_max_value
                for quan_weight in group_weight:
                    weight_quan_loss += (torch.norm(torch.round(weight_scale * quan_weight) - weight_scale * quan_weight))*ratio
                # all_quantized_loss +=  quantized_loss(group_weight, 1)
                all_quantized_loss += weight_quan_loss
    return all_quantized_loss


def quan_loss_layer1(model, ratio):
    all_quantized_loss = 0
    for name, module in model.named_modules():
        if name.startswith('layer1.'):  # 匹配第二层及其子模块
            for submodule_name, submodule in module.named_modules():
                if isinstance(submodule, nn.Conv2d):
                    # 获取当前卷积层的权重
                    weight = submodule.weight
                    # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
                    num_groups = weight.shape[0]
                    for i in range(num_groups):
                        group_weight = weight[i, :, :, :]
                        weight_quan_loss = 0
                        abs_max_value = torch.max(torch.abs(group_weight))
                        weight_scale = quantized_max_int / abs_max_value
                        for quan_weight in group_weight:
                            weight_quan_loss += (torch.norm(
                                torch.round(weight_scale * quan_weight) - weight_scale * quan_weight)) * ratio
                        all_quantized_loss += weight_quan_loss
    return all_quantized_loss

num_conv_groups = [64, 64, 128, 128, 256, 256, 512, 512]  # 假设每个卷积层的输出通道数
l1_alpha_per_group = [0.001, 0.001, 0.002, 0.002, 0.003, 0.003, 0.004, 0.004]

def group_lasso_regularization_differ(model, l1_alpha_per_group):
    l1_loss = 0
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv2d) :
            # 获取当前卷积层的权重
            weight = module.weight
            # 将权重按照卷积核分组，并对每个分组施加不同的L1正则化
            num_groups = weight.shape[0]
            for i in range(8,16):
                group_weight = weight[i, :, :, :]
                l1_loss += l1_alpha_per_group[i-8] * torch.norm(group_weight, 1)
    return l1_loss


def l2_regularization(model, l2_alpha):
    l2_loss = []
    for module in model.modules():
        if type(module) is nn.Conv2d:
            l2_loss.append((module.weight ** 2).sum() / 2.0)
    return l2_alpha * sum(l2_loss)


softmax = torch.nn.Softmax(dim=1)
sigmoid = torch.nn.Sigmoid()

transform = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])


transformtrain = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    torchvision.transforms.RandomErasing(p=0.3,scale=(0.02, 0.2),ratio=(0.5, 2), value=0,inplace=False),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

def make_dir(path):
    import os
    dir = os.path.exists(path)
    if not dir:
        os.makedirs(path)
make_dir('models')

batch_size = 8

train_set = torchvision.datasets.ImageFolder(root='../data/sdfcar/train', transform=transformtrain)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                          num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

val_dataset = torchvision.datasets.ImageFolder(root='../data/sdfcar/valid', transform=transform)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
                        num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = torchvision.models.resnet18(weights=True)
# net = torch.load("models/0-0.00000_59.482%_679.28232_82.872%_quan_conv1_1bitfreezen.pth")
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 196)  # 将输出维度修改为2

criterion = nn.CrossEntropyLoss()
net = net.to(device)
optimizer = torch.optim.AdamW(lr=0.0001, params=net.parameters())
eposhs = 35

l1_alpha = 0.001  # 正则化系数

# for param in net.conv1.parameters():
#     param.requires_grad = False

for epoch in range(eposhs):
    print(f'--------------------{epoch}--------------------')
    correct_train = 0
    sum_loss_train = 0
    total_correct_train = 0
    quan_loss_record =0
    # if(epoch==0 ):
    #     conv1_weight = net.conv1.weight.data
    #     # 定义量化的最大整数值
    #     quantized_max_int = 1
    #     # 计算每个通道的权重的绝对值的最大值
    #     for i in range(conv1_weight.size(0)):
    #         # 获取当前通道的权重
    #         weight = conv1_weight[i]
    #         # 计算当前通道权重的绝对值的最大值
    #         max_abs_value = torch.max(torch.abs(weight))
    #         # 计算当前通道权重的缩放比例
    #         weight_scale = quantized_max_int / max_abs_value
    #         # 对当前通道权重进行缩放和量化
    #         quantized_weight = torch.round(weight_scale * weight) / weight_scale
    #         # 更新当前通道的权重
    #         conv1_weight[i] = quantized_weight
    #     net.conv1.weight.data.copy_(conv1_weight)
    #     test_loss = quan_loss(net, 1)
    #     print(test_loss)
    count =0
    for inputs, labels in tqdm(train_loader):
        count += 1
        inputs = inputs.to(device)
        labels = labels.to(device)

        # get weight
        conv1_weight = net.conv1.weight.data
#get_loos
        # l1conv1_weights = net.layer1[0].conv1.weight
        # l1conv2_weights = net.layer1[0].conv2.weight
        #
        # l1conv3_weights = net.layer1[1].conv1.weight
        # l1conv4_weights = net.layer1[1].conv2.weight
        #
        # l2conv1_weights = net.layer2[0].conv1.weight
        # l2conv2_weights = net.layer2[0].conv2.weight
        #
        # l2conv3_weights = net.layer2[1].conv1.weight
        # l2conv4_weights = net.layer2[1].conv2.weight

        # l3conv1_weights = net.layer3[0].conv1.weight
        # l3conv2_weights = net.layer3[0].conv2.weight
        #
        # l3conv3_weights = net.layer3[1].conv1.weight
        # l3conv4_weights = net.layer3[1].conv2.weight
        #
        # l4conv1_weights = net.layer4[0].conv1.weight
        # l4conv2_weights = net.layer4[0].conv2.weight
        #
        # l4conv3_weights = net.layer4[1].conv1.weight
        # l4conv4_weights = net.layer4[1].conv2.weight

        # conv_weight=[]
        # conv_weight.append(l1conv1_weights)
        # conv_weight.append(l1conv2_weights)
        #
        # conv_weight.append(l1conv3_weights)
        # conv_weight.append(l1conv4_weights)
        #
        # conv_weight.append(l2conv1_weights)
        # conv_weight.append(l2conv2_weights)
        #
        # conv_weight.append(l2conv3_weights)
        # conv_weight.append(l2conv4_weights)
        #
        # conv_weight.append(l3conv1_weights)
        # conv_weight.append(l3conv2_weights)
        #
        # conv_weight.append(l3conv3_weights)
        # conv_weight.append(l3conv4_weights)
        #
        # conv_weight.append(l4conv1_weights)
        # conv_weight.append(l4conv2_weights)
        #
        # conv_weight.append(l4conv3_weights)
        # conv_weight.append(l4conv4_weights)

        # lossall_conv =[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
        # 定义量化的最大整数值
        loss_weight_l1conv1_weights = 0
        loss_weight_l1conv2_weights = 0
        quantized_max_int = 1
        # 计算每个通道的权重的绝对值的最大值
        # for i in range(conv1_weight.size(0)):
        #     # 获取当前通道的权重
        #     weight = conv1_weight[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight,1)

        # if(not (count%100)):
        #     print("quan_loss :", loss_weight_l1conv1_weights)
        # for i in range(l1conv1_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l1conv1_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        # # print(loss_weight_l1conv1_weights)
        # for i in range(l1conv2_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l1conv2_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        # for i in range(l1conv3_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l1conv3_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #     # print(loss_weight_l1conv1_weights)
        # for i in range(l1conv4_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l1conv4_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)

        # for i in range(l2conv1_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l2conv1_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        # # print(loss_weight_l1conv1_weights)
        # for i in range(l2conv2_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l2conv2_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        # for i in range(l2conv3_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l2conv3_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #     # print(loss_weight_l1conv1_weights)
        # for i in range(l2conv4_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l2conv4_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)

        # for i in range(l3conv1_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l3conv1_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        # # print(loss_weight_l1conv1_weights)
        # for i in range(l3conv2_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l3conv2_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale-weight)
        #
        # for i in range(l3conv3_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l3conv3_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #     # print(loss_weight_l1conv1_weights)
        # for i in range(l3conv4_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l3conv4_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #
        #
        # for i in range(l4conv1_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l4conv1_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #     # print(loss_weight_l1conv1_weights)
        # for i in range(l4conv2_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l4conv2_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #
        # for i in range(l4conv3_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l4conv3_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv1_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)
        #     # print(loss_weight_l1conv1_weights)
        # for i in range(l4conv4_weights.size(0)):
        #     # 获取当前通道的权重
        #     weight = l4conv4_weights[i]
        #     # 计算当前通道权重的绝对值的最大值
        #     max_abs_value = torch.max(torch.abs(weight))
        #     # 计算当前通道权重的缩放比例
        #     weight_scale = quantized_max_int / max_abs_value
        #     # 对当前通道权重进行缩放和量化
        #     loss_weight_l1conv2_weights += torch.norm(torch.round(weight_scale * weight) / weight_scale - weight)

        # print(loss_weight_l1conv2_weights)
#end get_loos


#get_all_conv_weight_loss
        # total_quantized_loss = 0
        # for name, module in net.named_modules():
        #     if isinstance(module, torch.nn.Conv2d):
        #         # 获取当前卷积层的权重
        #         weight = module.weight
        #         # 初始化当前卷积层的量化损失
        #         layer_quantized_loss = 0
        #         # 遍历当前卷积层的每个通道
        #         for i in range(weight.size(0)):
        #             # 获取当前通道的权重
        #             channel_weight = weight[i]
        #             # 计算当前通道权重的绝对值的最大值
        #             max_abs_value = torch.max(torch.abs(channel_weight))
        #             # 计算当前通道权重的缩放比例
        #             weight_scale = quantized_max_int / max_abs_value
        #             # 对当前通道权重进行缩放和量化
        #             quantized_channel_weight = torch.round(weight_scale * channel_weight) / weight_scale
        #             # 计算当前通道权重的量化损失并累加到当前层的量化损失中
        #             layer_quantized_loss += torch.norm(quantized_channel_weight - channel_weight)
        #         # 累加当前层的量化损失到总的量化损失中
        #         total_quantized_loss += layer_quantized_loss
        # print("Total quantization loss:", total_quantized_loss.item())
#end get_all_conv_weight_loss

        output = net(inputs)
        # l1_loss  = quan_loss_layer1(net, 1)
        # l1_loss = l1_regularization(net, l1_alpha, layer_index, channel_index) + l1_loss
        # quan_loss_record = loss_weight_l1conv1_weights + loss_weight_l1conv2_weights
        loss = criterion(output, labels) #+ quan_loss_record
        # if epoch == 0:
        #     loss = loss + l1_loss=
        # print(epoch ,l1_loss,loss )

        total_correct_train = total_correct_train + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        loss.backward()
        optimizer.step()
        correct_train = correct_train + (predicted == labels).sum().item()
    print("quan_loss: ", quan_loss_record)
    acc_train = correct_train / (total_correct_train+1)

    net.eval()
    correct_val = 0
    sum_loss_val = 0
    total_correct_val = 0
    for inputs, labels in tqdm(val_loader):
        inputs = inputs.to(device)
        labels = labels.to(device)
        output = net(inputs)

        loss = criterion(output, labels)
        sum_loss_val = sum_loss_val + loss.item()
        output = net(inputs)
        total_correct_val = total_correct_val + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        correct_val = correct_val + (predicted == labels).sum().item()

    acc_val = correct_val / total_correct_val
    print('验证准确率是{:.3f}%:'.format(acc_val*100) )
    torch.save(net,'models/{}-{:.5f}_{:.3f}%_{:.5f}_{:.3f}%_quan_conv1_1bitlayer1_l2.pth'.format(epoch,sum_loss_train,acc_train *100,sum_loss_val,acc_val*100))