import torch
from torchvision.transforms import transforms
import h5py
from PIL import Image
import time


def predictTime(class_image, defect_image, torch_weight):
    DEVICE = torch.device("cpu")

    # preprocess
    transform_test = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.40272543], std=[0.13901867])
        ])

    image_class = Image.open(class_image).convert("L")
    image_class = transform_test(image_class)
    image_class = torch.unsqueeze(image_class, dim=0).to(DEVICE)

    image_defect = Image.open(defect_image).convert("L")
    image_defect = transform_test(image_defect)
    image_defect = torch.unsqueeze(image_defect, dim=0).to(DEVICE)

    inputs_dict = {
        'class_images': image_class,
        'defect_images': image_defect
    }

    # Predict
    model = torch.load(torch_weight)
    model = model.eval().to(DEVICE)
    start_time = time.time()
    for i in range(200):
        out = model(image_class, image_defect)
    end_time = time.time()
    print((end_time - start_time) / 200)




def predictClassDefectImages(class_image, defect_image, torch_weight):
    categories = ['ADI_particle_developed', 'Array_peeling', 'Cu_missing', 'Other_peeling', 'Partial_etch',
                  'Pattern_fail',
                  'PR_peeling', 'Seam', 'Reference', 'Surface_particle', 'Burried_particle', 'Cu_diffuse',
                  'Prelayer_defect_developed',
                  'Void', 'Residue', 'Scratch']

    categories_to_id = dict((c, i) for i, c in enumerate(categories))
    id_to_categories = dict((v, k) for k, v in categories_to_id.items())

    DEVICE = torch.device("cpu")

    # preprocess
    transform_test = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.40272543], std=[0.13901867])
        ])

    DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    image_class = Image.open(class_image).convert("L")
    image_class = transform_test(image_class)
    image_class = torch.unsqueeze(image_class, dim=0).to(DEVICE)

    image_defect = Image.open(defect_image).convert("L")
    image_defect = transform_test(image_defect)
    image_defect = torch.unsqueeze(image_defect, dim=0).to(DEVICE)

    inputs_dict = {
        'class_images': image_class,
        'defect_images': image_defect
    }

    # Predict
    model = torch.load(torch_weight)
    model = model.eval().to(DEVICE)
    out = model(image_class, image_defect)
    print(f"model output:{out}")
    _, pred = torch.max(out.data, 1)
    print(f"defect id:{pred}")
    res = id_to_categories[pred.data.item()]  # defect name
    print(f"defect name:{res}")
    return res

if __name__ == '__main__':
    class_image = 'origin_class.jpg'
    defect_image = 'origin_defect.jpg'
    torch_weight = 'model_123_92.304.pth'
    # predictClassDefectImages(class_image, defect_image, torch_weight)
    predictTime(class_image, defect_image, torch_weight)
    '''
    model output:tensor([[-12.1442, -17.5433,  -7.7298, -14.6615, -17.1379, -14.4158, -16.0302,
         -15.5619,  -6.7681, -12.1833,  -9.3172,  -2.4802, -15.1773, -10.8081,
           5.3558, -10.6745]], device='cuda:0', grad_fn=<AddmmBackward0>)
    defect id:tensor([14], device='cuda:0')
    defect name:Residue
    '''