import torch
import torch.nn as nn
import math
import numpy as np
import cv2


from vortex.utils.bbox import non_max_suppression
from vortex.network.head.yolov3 import YoloV3Head, YoloDetect, YoloLoss
from vortex.network.backbone import get_backbone_model

import vortex.data.transforms as VT
import vortex.data.transforms.image as VIT
from torchvision import transforms as FT


DEFAULT_ANCHORS = np.array([[[116, 90], [156, 198], [373, 326]],
                            [[30, 61], [62, 45], [59, 119]],
                            [[10, 13], [16, 30], [33, 23]]])


class YoloV3(nn.Module):
    def __init__(self, num_classes, image_size=(416, 416), anchors=None, backbone='darknet53', backbone_weights=None):
        super(YoloV3, self).__init__()

        self.num_classes = num_classes
        self.conf_thresh = 0.5
        self.nms_thresh = 0.45
        self.image_size = image_size

        self.backbone = get_backbone_model(backbone, backbone_weights)
        if anchors is None:
            anchors = DEFAULT_ANCHORS
        num_feature_layers = len(anchors)  # it should be 3
        num_anchors = [len(a) for a in anchors]
        self.head = YoloV3Head(num_classes, num_anchors)
        
        self.detect_fns = []
        for i in range(num_feature_layers):
            self.detect_fns.append(YoloDetect(anchors[i], num_classes, image_size))

        self.loss_fns = []
        for i in range(num_feature_layers):
            self.loss_fns.append(YoloLoss(anchors[i], num_classes, image_size))
    
    def get_data_transform(self, split):
        assert split in ['train', 'trainval', 'test', 'val']
        transforms = []

        if split in ['train', 'trainval']:
            transforms.append(VT.RandomPhotometric())
            transforms.append(VT.RandomExpand())
            transforms.append(VT.RandomScaledCrop())
            transforms.append(VT.RandomHorizontalFlip())
        
        transforms.append(VT.Resize(size=self.image_size))
        transforms.append(VT.ToTensor())
        transforms.append(VT.Normalize())

        transforms = VT.Compose(transforms)
        return transforms
    
    def get_inference_transform(self):
        """
        inference transforms take single image as input
        this should be compatible with self.get_data_transform.
        """
        # transforms = VIT.ImageToTensor(self.image_size)
        resize = VIT.ImageResize(self.image_size)
        to_tensor = FT.ToTensor()
        normalize = FT.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        transforms = FT.Compose([resize, to_tensor, normalize])
        
        return transforms

    def detect(self, inputs):
        with torch.no_grad():
            outputs = self.backbone(inputs)
            outputs = self.head(outputs)
        
        output_list = []
        for i in range(3):
            output_list.append(self.detect_fns[i](outputs[i]))
        output = torch.cat(output_list, 1)
        batch_detections = non_max_suppression(output, self.num_classes, 
            conf_thres=self.conf_thresh, nms_thres=self.nms_thresh)
        
        outputs = batch_detections[0].cpu().numpy()
        
        # TODO: parse outputs into boxes, labels, confs
        boxes = []
        scores = []
        labels = []
        for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs:
            print(x1, y1, x2, y2, conf, cls_conf, cls_pred)
            boxes.append([x1 / self.image_size[0], y1 / self.image_size[1], 
                          x2 / self.image_size[0], y2 / self.image_size[1]])
            scores.append(conf * cls_conf)
            labels.append(int(cls_pred))
        return boxes, labels, scores

    def forward(self, inputs, boxes, labels):
        outputs = self.backbone(inputs)
        outputs = self.head(outputs)
        losses = []
        for i in range(3):
            loss = self.loss_fns[i](outputs[i], boxes, labels)
            # print('model loss {}: {}'.format(i, loss))
            losses.append(loss)
        return sum(losses)


