import json

import torch
import torchvision.transforms

from backbone.darknet53 import YoloBody
from backbone.d import YOLOModel
from utils.dataset import COCO
from utils.object_detection import bbox_position_cast, non_max_suppress
from utils.train import init_weights
from framework.yolov3 import YOLOLoss, YOLOInfer
import torch.utils.data

if __name__ == '__main__':
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Resize((416, 416)),
    ])
    device = torch.device('cpu')

    validate_dataset = COCO('./out/data/datasets/banana-detection/train', 'images', 'annotations.json',
                            transform=transform)

    model = YoloBody([3, 3, 3], 1)

    model.load_state_dict(torch.load('./out/models/train_e30.pth', map_location=device))

    max_anchors = torch.tensor([[116, 90],
                                [156, 198],
                                [373, 326]], device=device)
    medium_anchors = torch.tensor([[30, 61],
                                   [62, 45],
                                   [59, 119],
                                   ], device=device)
    min_anchors = torch.tensor([[10, 13],
                                [16, 30],
                                [33, 23], ], device=device)
    yolo_infer = YOLOInfer([max_anchors, medium_anchors, min_anchors], c=1, strides=[13, 26, 52],
                           image_size=torch.Size([416, 416]), device=device
                           )
    yolo_loss = YOLOLoss([max_anchors, medium_anchors, min_anchors], c=1, strides=[13, 26, 52],
                         image_size=torch.Size([416, 416]), device=device)
    coco_results = []
    model.eval()


    def collate_fn(batch):
        images = []
        labels = []
        for image, label in batch:
            images.append(image)
            labels.append(label)

        return torch.stack(images), labels


    for b, (x, y) in enumerate(
            torch.utils.data.DataLoader(validate_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn)):
        x = x.to(device)
        y_p = model(x)
        print(yolo_loss(y_p, y))

        boxes, confidences, classes = yolo_infer(y_p)

        obj_ids = (confidences > 0.5).squeeze()
        obj_boxes = boxes[obj_ids]
        obj_confidences = confidences[obj_ids]
        obj_classes = classes[obj_ids]
        ids = non_max_suppress(obj_boxes, obj_confidences.squeeze(), 0.5, device=device)
        results = torch.cat([obj_boxes[ids], obj_confidences[ids], obj_classes[ids]], dim=1).tolist()
        break
        for r in results:
            coco_results.append({'image_id': b, 'category_id': int(r[5]), 'bbox': r[0:4], 'score': r[4]})

    with open('results.json', 'w') as f:

        json.dump(coco_results, f)

# /Users/zmh/DataFiles/github/deep_learning/venv/bin/python /Users/zmh/DataFiles/github/deep_learning/main.py
# tensor(0) tensor(1) tensor(6)
# tensor([[ 0.2461,  0.9805, -0.6045, -0.3767,  1.0000,  1.0000],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
#         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]])
# tensor(1) tensor(3) tensor(12)
# tensor([[0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0.]])
# tensor(2) tensor(7) tensor(24)
# tensor([[0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0.],
#         [0., 0., 0., 0., 0., 0.]])
# 2909.8212890625
#
# Process finished with exit code 0
