import torch
import torchvision.transforms as transforms
from PIL import Image
import torch.nn.functional as F
import glob
from network import vgg


def main():
    with open('../../datasets/flowers/classes.txt', 'r') as text_cls:
        classes_str = text_cls.read()
    classes = classes_str.split(',')  # classes = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']

    transform = transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

    model_name = "vgg16"
    net = vgg(model_name=model_name, num_classes=5)
    net.load_state_dict(torch.load('../../models/VggNet_best.pth', map_location='cpu')['state_dict'])

    net.eval()
    sunflowers_paths = glob.glob("../../datasets/flowers/test/sunflowers/*")
    for path in sunflowers_paths[:15]:
        im = Image.open(path)
        # print(np.array(im).shape)
        im = transform(im)  # [C, H, W]
        # print(np.array(im).shape)
        im = torch.unsqueeze(im, dim=0)  # [N, C, H, W]
        # print(np.array(im).shape)
        with torch.no_grad():
            outputs = net(im)
            # print(path)
            predict = torch.max(outputs, dim=1)[1].numpy()
            print(classes[int(predict)], outputs.shape, F.softmax(outputs, dim=1))


if __name__ == '__main__':
    main()
