from PIL import Image
import os
def produceImage(file_in, width, height, file_out):
    image = Image.open(file_in)
    resized_image = image.resize((width, height), Image.ANTIALIAS)
    resized_image.save(file_out)

import torchvision.models as models


import os
import json

import torch
import torch.nn as nn
from torchvision import transforms, datasets
import torch.optim as optim
from tqdm import tqdm
from collections import OrderedDict

class VGG_cls(nn.Module):
    def __init__(self):
        super(VGG_cls, self).__init__()
        self.backbone = models.vgg16(pretrained=True)
        self.SEblock = nn.Sequential(OrderedDict([
                ('SE1',   nn.Sequential(
                                        nn.Linear(512, 128),
                                        nn.ReLU())),
                ('SE2',   nn.Sequential(nn.Dropout(0.5),
                                        nn.Linear(128, 512)
                                        ))]))
        self.GAP = torch.nn.AdaptiveAvgPool2d(1)
        self.softmax = torch.nn.Softmax(dim=1)
        self.classifier = nn.Sequential(OrderedDict([
                ('fc1',   nn.Sequential(
                                        nn.Linear(512 * 7 * 7, 512),
                                        nn.ReLU())),
                ('fc2',   nn.Sequential(nn.Dropout(0.5),
                                        nn.Linear(512, 7)
                                        ))]))
    def forward(self,x):
        backbone_feat = self.backbone.features(x)
        feat_squeeze = self.GAP(backbone_feat).squeeze()
        SE_weight = self.softmax(self.SEblock(feat_squeeze))
        backbone_feat_SE = backbone_feat * SE_weight.unsqueeze(2).unsqueeze(2)
        backbone_feat_SE = backbone_feat_SE.view(-1, 512 * 7 * 7)
        score = self.classifier(backbone_feat_SE)
        return backbone_feat_SE, score
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    device = torch.device("cpu")
    print("using {} device.".format(device))

    data_transform = {
        # transforms.RandomHorizontalFlip(),
        "train": transforms.Compose([
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
        "validation": transforms.Compose([transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    image_path = './dataset'
    train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "training"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    exp_list = train_dataset.class_to_idx

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    nw = 0
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "test"),
                                            transform=data_transform["validation"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw)
    print("using {} images for training, {} images for validation.".format(train_num,
                                                                           val_num))
    
    net = VGG_cls()
    net.to(device)
    loss_function = nn.CrossEntropyLoss()  #交叉熵损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.0001)

    epochs = 60
    best_acc = 0.0
    save_path = './{}Net.pth'.format('vgg')
    train_steps = len(train_loader)
    for epoch in range(epochs):
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        
        confusion_matrix = torch.zeros((7,7),dtype=torch.int)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            feat,outputs = net(images.to(device))

            pred = torch.argmax(outputs.clone().detach().cpu(),dim=1)

            for cur_pred, cur_label in zip(pred, labels):
                confusion_matrix[cur_pred, cur_label] += 1
                
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)
        # print(confusion_matrix)

        # validate
        net.eval()
        acc = 0.0 
        with torch.no_grad():  #验证的过程不计算损失梯度
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                feat,outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')


if __name__ == '__main__':
    main()