import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm

from torchvision import transforms
import torchvision

from torch.utils.data import DataLoader
import math

quantized_max_int = 1
num_epochs_to_freeze = 1

def e_x(num):
    return math.exp(num)


softmax = torch.nn.Softmax(dim=1)
sigmoid = torch.nn.Sigmoid()

transform = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])


transformtrain = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    torchvision.transforms.RandomErasing(p=0.3,scale=(0.02, 0.2),ratio=(0.5, 2), value=0,inplace=False),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

def make_dir(path):
    import os
    dir = os.path.exists(path)
    if not dir:
        os.makedirs(path)
make_dir('models')

batch_size = 8

train_set = torchvision.datasets.ImageFolder(root='../data/sdfcar/train', transform=transformtrain)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                          num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

val_dataset = torchvision.datasets.ImageFolder(root='../data/sdfcar/valid', transform=transform)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
                        num_workers=0)  # Batch Size定义：一次训练所选取的样本数。 Batch Size的大小影响模型的优化程度和速度。

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = torchvision.models.resnet18(weights=True)
# net = torch.load("models/0-0.00000_59.482%_679.28232_82.872%_quan_conv1_1bitfreezen.pth")
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 196)  # 将输出维度修改为2

criterion = nn.CrossEntropyLoss()
net = net.to(device)
optimizer = torch.optim.AdamW(lr=0.0001, params=net.parameters())
eposhs = 35

l1_alpha = 0.001  # 正则化系数

# for param in net.conv1.parameters():
#     param.requires_grad = False

for epoch in range(eposhs):
    print(f'--------------------{epoch}--------------------')
    correct_train = 0
    sum_loss_train = 0
    total_correct_train = 0
    quan_loss_record =0

    count =0
    for inputs, labels in tqdm(train_loader):
        count += 1
        inputs = inputs.to(device)
        labels = labels.to(device)
        output = net(inputs)
        loss = criterion(output, labels)
        # if epoch == 0:
        #     loss = loss + l1_loss=
        # print(epoch ,l1_loss,loss )

        total_correct_train = total_correct_train + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        loss.backward()
        optimizer.step()
        correct_train = correct_train + (predicted == labels).sum().item()
    acc_train = correct_train / (total_correct_train+1)

    net.eval()
    correct_val = 0
    sum_loss_val = 0
    total_correct_val = 0
    for inputs, labels in tqdm(val_loader):
        inputs = inputs.to(device)
        labels = labels.to(device)
        output = net(inputs)

        loss = criterion(output, labels)
        sum_loss_val = sum_loss_val + loss.item()
        output = net(inputs)
        total_correct_val = total_correct_val + labels.size(0)
        optimizer.zero_grad()
        _, predicted = torch.max(output.data, 1)
        correct_val = correct_val + (predicted == labels).sum().item()

    acc_val = correct_val / total_correct_val
    print('验证准确率是{:.3f}%:'.format(acc_val*100) )
    torch.save(net,'models/{}-{:.5f}_{:.3f}%_{:.5f}_{:.3f}%_quan_conv1_1bitlayer1_l2.pth'.format(epoch,sum_loss_train,acc_train *100,sum_loss_val,acc_val*100))