import os
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
from PIL import Image
import numpy as np
import datetime


# Hyper parameters
num_classes = 11
label_dict = {
    'A': 0,
    'B': 1,
    'C': 2,
    'D': 3,
    'E': 4,
    'F': 5,
    'G': 6,
    'H': 7,
    'I': 8,
    'J': 9,
    'K': 10,
}
# print(label_dict)

label_dict_inv = {}
for k in label_dict.keys():
    label_dict_inv[label_dict[k]] = k
# print(label_dict_inv)


device = 'cuda' if torch.cuda.is_available() else 'cpu'

LOAD_MODEL_PATH = 'runs_resnet18/2021-07-25_19_23_32_776890/checkpoints/best.pt'

IMG_W = 64  # ?
IMG_H = 64  # ?

valid_transform = transforms.Compose([
    transforms.Resize((IMG_H, IMG_W)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

# model
model = torchvision.models.resnet18(pretrained=False)
new_fc = torch.nn.Linear(in_features=512, out_features=num_classes, bias=True)
model.fc = new_fc
# print(model)
if LOAD_MODEL_PATH is not None:
    model.load_state_dict(torch.load(LOAD_MODEL_PATH, map_location='cpu'))  # !!!
    print('loaded model:', LOAD_MODEL_PATH)
model.to(device)
model.eval()


# feeding
def do_11_classify(arr_img):
    img = Image.fromarray(np.uint8(arr_img))
    img = valid_transform(img)
    img = img[np.newaxis, :, :, :]
    with torch.no_grad():
        img = img.to(device)
        pred = model(img)
        _, pred_cls = torch.max(pred.data, 1)
        # print('pred_cls:', pred_cls)
        # print('pred_cls[0]:', pred_cls[0].cpu().numpy())
        cls_name = label_dict_inv[int(pred_cls[0].cpu().numpy())]
        # print(cls_name)
        return cls_name


def do_2_classify(arr_img):
    pass


def get_detected_patch(src_img, x, y, w, h, a) -> np.ndarray:
    # cv2.imshow('raw', src_img)
    # cv2.waitKey(1)

    # the order of the box points: bottom left, top left, top right, bottom right
    rect = ((x, y), (w, h), a)
    box = cv2.boxPoints(rect)
    box = np.int0(box)

    # print("bounding box: \n{}".format(box))
    # cv2.drawContours(src_img, [box], 0, (255, 0, 255), 3)
    # cv2.imshow('with bbox', src_img)
    # cv2.waitKey(10000)

    # get width and height of the detected rectangle
    width, height = int(w), int(h)

    src_pts = box.astype("float32")
    # coordinate of the points in box points after the rectangle has been straightened
    dst_pts = np.array([[0, height - 1],
                        [0, 0],
                        [width - 1, 0],
                        [width - 1, height - 1]], dtype="float32")

    # the perspective transformation matrix
    M = cv2.getPerspectiveTransform(src_pts, dst_pts)

    # directly warp the rotated rectangle to get the straightened rectangle
    warped = cv2.warpPerspective(src_img, M, (width, height))

    # cv2.imshow('cut result', warped)
    # cv2.waitKey(10000)

    # return cut result
    return warped



if __name__ == '__main__':
    import cv2
    path_img = './data/val_planes_cut/D_pic446_0.png'
    img = cv2.imread(path_img)
    # cv2.imshow('', img)
    # cv2.waitKey(0)
    cls_name = do_11_classify(img)
    print(cls_name)

    # src_img = 
    # x = 
    # y = 
    # w = 
    # h = 
    # angle = 
    # target_patch = get_detected_patch(src_img, x, y, w, h, angle)
    # cls_name = do_11_classify(target_patch)
