import os
import glob
from classy_vision.generic.registry_utils import import_all_packages_from_directory
from classy_vision.generic.util import load_json, load_checkpoint, update_classy_model
from classy_vision.models import build_model
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
from torchvision.datasets.folder import is_image_file


if __name__ == '__main__':
    import_all_packages_from_directory(os.getcwd())

    config_file = '/home/lixuan/shelf_grid_detection/image_orientation_recognition/configs/scene.json'
    config = load_json(config_file)

    # checkpoint_file = '/data3/shihairuan/ckpt/image_orientation_recognition/hem/res320_adam_lr0.0001/model_phase-198_end.torch'
    # checkpoint_file = '/data3/shihairuan/ckpt/image_orientation_recognition/hem/res448_adam_lr0.0001/model_phase-178_end.torch'
    checkpoint_file = '/home/lixuan/shelf_grid_detection/image_orientation_recognition/output_2021-07-01T09:27:41.132533/checkpoints/model_phase-998_end.torch'
    # checkpoint_file = '/data3/shihairuan/ckpt/image_orientation_recognition/hem/not_general_res448_adam_lr0.0001/model_phase-198_end.torch'
    checkpoint = load_checkpoint(checkpoint_file)

    model = build_model(config['model'])
    state_load_success = update_classy_model(
        model=model,
        model_state_dict=checkpoint['classy_state_dict']['base_model'],
        reset_heads=True,
        strict=True
    )
    model.eval()

    device = 'cuda'
    model.to(device)

    transform = transforms.Compose([
        # lambda x: cv2.resize(x, (448, 448), interpolation=cv2.INTER_AREA),
        lambda x: cv2.resize(x, (224,224), interpolation=cv2.INTER_AREA),
        transforms.ToTensor(),
        # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])

    # img_dir = '/data3/shihairuan/datasets/projects/image_orientation_recognition/training/val/许可证/**'
    # img_dir = '/data3/shihairuan/datasets/projects/image_orientation_recognition/training/train/hem/卷烟陈列/shandong_20210614/**'
    img_dir = '/home/lixuan/workspace/project/zhejiang/classify/a/8/*'
    # img_dir = '/data4/datasets/aimall/烟草/山东中烟/前背柜20210616/已矫正方向/*'
    count = [[0, 0, 0, 0, 0,0,0,0,0], 0]
    for pid, path in enumerate(sorted([x for x in glob.glob(img_dir, recursive=True) if is_image_file(x)])):
        # print(pid+1, path)
        img = cv2.imread(path)

        imgs = [img]

        inp = [transform(x) for x in imgs]
        inp = torch.stack(inp, dim=0).to(device)
        with torch.no_grad():
            out = model(inp)
            out = F.softmax(out, dim=1)

        probs = out.cpu().numpy()
        preds = np.argmax(probs, axis=1)
        print(preds)
        # preds = (preds-(0, 1, 2, 3)) % 4
        # cv2.putText(img, '%s' % preds, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)

        # correct = np.std(preds) == 0
        # correct = np.all(preds == 0)
        # count[0][0] += int(preds[0] == 0)
        # count[0][1] += int(preds[1] == 0)
        # count[0][2] += int(preds[2] == 0)
        # count[0][3] += int(preds[3] == 0)
        # count[0][4] += int(correct)
        # count[1] += 1
        # print('  0°: %.4f (%d/%d)' % (count[0][0]/count[1], count[0][0], count[1]))
        # print(' 90°: %.4f (%d/%d)' % (count[0][1]/count[1], count[0][1], count[1]))
        # print('180°: %.4f (%d/%d)' % (count[0][2]/count[1], count[0][2], count[1]))
        # print('270°: %.4f (%d/%d)' % (count[0][3]/count[1], count[0][3], count[1]))
        # print('mean: %.4f (%d/%d)' % (np.mean(count[0][0:4])/count[1], np.mean(count[0][0:4]), count[1]))
        # print(' all: %.4f (%d/%d)' % (count[0][4]/count[1], count[0][4], count[1]))

        cv2.namedWindow('img', cv2.WINDOW_NORMAL)
        cv2.imshow('img', img)
        if cv2.waitKey(1) == 27:
            break

        # if not correct:
        #     # import shutil
        #     # dst = os.path.join('/data/datasets/hem/山东烟草陈列', os.path.relpath(path, img_dir.strip('*')))
        #     # os.makedirs(os.path.dirname(dst), exist_ok=True)
        #     # shutil.copy(path, dst)
        #     cv2.waitKey()
