# coding: utf-8
import time
from collections import OrderedDict
from data import *
from refinedetvgg16 import *
from layers.functions.Detect import Detect
from pylab import plt
from data import VOC_CLASSES as labelmap


def test_net(save_folder, net, priorboxes, detector, device, testset, transform, thresh):
    # dump predictions and assoc. ground truth to text file for now
    filename = save_folder+'test1.txt'
    num_images = len(testset)
    for i in range(num_images):
        print('Testing image {:d}/{:d}....'.format(i+1, num_images))
        img = testset.pull_image(i)
        img_id, annotation = testset.pull_anno(i)
        x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))

        with open(filename, mode='a') as f:
            f.write('\nGROUND TRUTH FOR: '+img_id+'\n')
            for box in annotation:
                f.write('label: '+' || '.join(str(b) for b in box)+'\n')
        x = x.to(device)

        with torch.no_grad():
            y = net(x)
            arm_loc, arm_conf, odm_loc, odm_conf = y
            y = detector.forward(arm_loc, arm_conf, odm_loc, odm_conf, priorboxes)
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([img.shape[1], img.shape[0],
                             img.shape[1], img.shape[0]])
        pred_num = 0
        for i in range(detections.size(1)):
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                if pred_num == 0:
                    with open(filename, mode='a') as f:
                        f.write('PREDICTIONS: '+'\n')
                score = detections[0, i, j, 0]
                label_name = labelmap[i-1]
                pt = (detections[0, i, j, 1:]*scale).cpu().numpy()
                coords = (pt[0], pt[1], pt[2], pt[3])
                pred_num += 1
                with open(filename, mode='a') as f:
                    f.write(str(pred_num)+' label: '+label_name+' score: ' +
                            str(score) + ' '+' || '.join(str(c) for c in coords) + '\n')
                j += 1


def test_voc():
    DATASET_ROOT = "~/dataset/VOCdevkit"
    DATASET_ROOT = os.path.expanduser(DATASET_ROOT)

    save_folder = "./output/"
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    cfg = voc320
    visual_threshold = 0.6
    USE_CUDA = True
    device = torch.device("cuda:0" if torch.cuda.is_available() and USE_CUDA else "cpu")

    checkpoint_path = "./models/refinedet_vgg16_320_20181205_final.pkl"

    net = RefineDetVGG16("test", cfg["min_dim"], cfg["num_classes"])

    # priorbox
    net_priorbox = PriorBox(cfg)
    with torch.no_grad():
        priorboxes = net_priorbox.forward()
    priorboxes = priorboxes.to(device)

    # detection
    detector = Detect(21, 0, 200, 0.01, 0.45, cfg['variance'], 0.01)
    detector.to(device)
    detector.eval()

    #
    checkpoint = torch.load(checkpoint_path)
    weights = OrderedDict()
    for k, v in checkpoint["weights"].items():
        name = k[7:]
        weights[name] = v

    net.load_state_dict(weights)
    net.to(device)
    net.eval()

    # load net
    print('Finished loading model!')
    # load data
    testset = VOCDetection(DATASET_ROOT, [('2012', 'val')], None, VOCAnnotationTransform())

    # evaluation
    test_net(save_folder, net, priorboxes, detector, device, testset,
             BaseTransform(net.size, (104, 117, 123)),
             visual_threshold)

if __name__ == '__main__':
    test_voc()
