import sys
import cv2
import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
from face_detector import face_detector
import matplotlib.pyplot as plt

name = os.listdir("../lskdata/data")
name.sort()
print(name)

image_size = (224, 224)
data_transform = transforms.Compose([transforms.Resize((224, 224)),
                                     #transforms.RandomHorizontalFlip(),
                                     # T.RandomVerticalFlip(),
                                     # T.ColorJitter(0.5, 0.5, 0.5, 0.5),
                                     #transforms.Pad(10),
                                     #transforms.RandomCrop((224, 224)),
                                     # T.RandomRotation(90),
                                     transforms.ToTensor(),
                                     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
#mypath = "../lskdata/data/mask"
#names = os.listdir(mypath)
#
#path_model = 'lsk-model/face_mask.pkl'  # load the saved model
#model = torch.load(path_model, map_location=torch.device('cpu'))
#model.eval()  # change the behavior of the model
#
#count = 0
#
#for i in names:
#    nowfile = os.path.join(mypath,i)
#    img = Image.open(nowfile).convert("RGB")
#    img = data_transform(img)
#    img = img.unsqueeze(0)
#    out = nn.Softmax(dim=1)(model(img))
#    valu = out.argmax(dim=1).item()
#    print(name[valu])
#    count +=1
#    if count>10:
#        exit(0)


cap = cv2.VideoCapture(0)



while True:
    _, img = cap.read()

    faces = face_detector(img)
    for face in faces:
        x, y, w, h = face
        x, y = max(x, 0), max(y, 0)

        img_face = img[y:y + h, x:x + w]

        plt.imshow(img_face)
        plt.show()
        while(1):
            pass

