import torch
import torchvision
import torch.utils.data
from object_detect import ObjectDetect
from train_with_torch import Train, Accumulator
from dataset import BananaDataset
import matplotlib.pyplot as plot


class SSDObjectDetect(ObjectDetect):
    def target_bbox(self, anchors_bboxes, labels_bboxes):
        offsets, classes, masks = [], [], []
        n = anchors_bboxes.shape[0]
        for i in range(labels_bboxes.shape[0]):
            label_bboxes = labels_bboxes[i, :, :]
            anchor_true_output_map = self.anchor_true_map(anchors_bboxes, label_bboxes[:, 1:])
            class_labels = torch.zeros(n, dtype=torch.long, device=self.device)
            true_bboxes = torch.zeros((n, 4), dtype=torch.float32, device=self.device)
            index = torch.squeeze(torch.nonzero(anchor_true_output_map >= 0))
            assigned_indexes = anchor_true_output_map[index]
            class_labels[index] = label_bboxes[assigned_indexes, 0].long() + 1
            true_bboxes[index] = label_bboxes[assigned_indexes, 1:]

            offset = self.anchor_true_offset(anchors_bboxes, true_bboxes, None)
            #         print(offset[0])
            offsets.append(offset)
            classes.append(class_labels)
            masks.append((anchor_true_output_map >= 0).reshape(-1, 1).repeat(1, 4))

        offsets = torch.stack(offsets)
        classes = torch.stack(classes)
        masks = torch.stack(masks)
        return offsets, classes, masks

    def concat_predicts(self, predicts):
        a = []
        for p in predicts:
            a.append(torch.flatten(p.permute(0, 2, 3, 1), start_dim=1))
        return torch.cat(a, dim=1)


class EnlargeViewBlock(torch.nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.sequential = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.BatchNorm2d(out_channels),
            torch.nn.ReLU(),
            torch.nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.BatchNorm2d(out_channels),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d((2, 2), (2, 2))
        )

    def forward(self, x):
        return self.sequential(x)


class BaseBlock(torch.nn.Module):
    def __init__(self, begin_channels, in_channels):
        super().__init__()
        self.sequential = torch.nn.Sequential(
            EnlargeViewBlock(begin_channels, in_channels),
            EnlargeViewBlock(in_channels, in_channels * 2),
            EnlargeViewBlock(in_channels * 2, in_channels * 4)
        )

    def forward(self, x):
        return self.sequential(x)


class TinySSD(torch.nn.Module):
    def __init__(self, q, sizes, ratios):
        super().__init__()
        self.q = q
        self.a = len(sizes[0]) + len(ratios[0]) - 1
        self.sizes = sizes
        self.ratios = ratios
        self.sequential = torch.nn.Sequential(
            BaseBlock(3, 32),
            EnlargeViewBlock(128, 128),
            EnlargeViewBlock(128, 128),
            EnlargeViewBlock(128, 128),
            torch.nn.AdaptiveMaxPool2d((1, 1))
        )

        self.class_predictors = torch.nn.Sequential(
            torch.nn.Conv2d(128, self.a * (self.q + 1), kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * (self.q + 1), kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * (self.q + 1), kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * (self.q + 1), kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * (self.q + 1), kernel_size=(3, 3), padding=(1, 1)),

        )
        self.offset_predictors = torch.nn.Sequential(
            torch.nn.Conv2d(128, self.a * 4, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * 4, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * 4, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * 4, kernel_size=(3, 3), padding=(1, 1)),
            torch.nn.Conv2d(128, self.a * 4, kernel_size=(3, 3), padding=(1, 1))
        )

    def forward(self, x):
        class_predicts_array = []
        offset_predicts_array = []
        anchors_array = []
        for s, size, ratio, class_predict, offset_predictor in zip(self.sequential, self.sizes, self.ratios,
                                                                   self.class_predictors, self.offset_predictors):
            x = s(x)
            anchor = ssd_object_detection.multi_box(x.shape[2], x.shape[3], sizes=size, ratios=ratio)
            class_predict = class_predict(x)
            offset_predict = offset_predictor(x)
            class_predicts_array.append(class_predict)
            offset_predicts_array.append(offset_predict)
            anchors_array.append(anchor)
        class_predicts_array = ssd_object_detection.concat_predicts(class_predicts_array)
        offset_predicts_array = ssd_object_detection.concat_predicts(offset_predicts_array)
        # print(class_predicts_array.shape,offset_predicts_array.shape)
        return [torch.cat(anchors_array, dim=0), class_predicts_array.reshape(class_predicts_array.shape[0], -1,
                                                                              self.q + 1),
                offset_predicts_array.reshape(
                    offset_predicts_array.shape[0], -1, 4)]


class SSDTrain(Train):
    def __init__(self, sequential, optimizer, scheduler, train_iter, epochs, device, accumulator):
        super().__init__(sequential, optimizer, scheduler, train_iter, epochs, device, accumulator)

    def loss(self, x, y, y_pred):
        a, cp, op = y_pred[0], y_pred[1], y_pred[2]
        o, c, masks = ssd_object_detection.target_bbox(torch.squeeze(a, 0), y.unsqueeze(1))
        self.o = o
        self.masks = masks
        l1 = torch.nn.CrossEntropyLoss(reduction='none')
        l2 = torch.nn.L1Loss(reduction='none')
        return self.offset_class_loss(l1, l2, cp, c, op, o, masks).mean()

    def offset_class_loss(self, class_loss, offset_loss, class_predicts, class_labels, offset_predicts, offset_labels,
                          masks):
        batch_size, num_classes = class_predicts.shape[0], class_predicts.shape[2]
        # class_predicts shape: (batch_size, num_anchors, )

        l = class_loss(class_predicts.reshape(-1, num_classes), class_labels.reshape(-1)).reshape(batch_size, -1).mean(
            dim=1) + offset_loss(

            offset_predicts * masks, offset_labels * masks).mean(dim=2).mean(dim=1)
        return l

    def train_accuracy(self, x, y, y_pred):
        a, cp, op = y_pred[0], y_pred[1], y_pred[2]
        target_true_bboxes = self.o[self.masks].unsqueeze(1)
        target_predict_bboxes = op[self.masks].unsqueeze(1)
        mAP = []

        for i in range(target_true_bboxes.shape[0]):
            tb = target_true_bboxes[i]
            pb = target_predict_bboxes[i]
            iou_table = ssd_object_detection.iou(tb, pb, mode='union')
            show_indices = iou_table > 0.5
            pb_show = pb[show_indices]
            tb_show = tb[show_indices]
            if tb_show.shape[0] > 0:
                fig = plot.imshow(x[i].to('cpu').permute(1, 2, 0).long())
                ssd_object_detection.show_bboxes(fig, tb_show.to('cpu'), x.shape[3], x.shape[2], 'red')
                ssd_object_detection.show_bboxes(fig, pb_show.to('cpu'), x.shape[3], x.shape[2], 'blue')
                plot.show()
            tc = torch.sort(iou_table).indices
            ap = 0.0
            for c in range(tc.shape[0]):
                bboxes_indices = tc[0:c + 1]
                # tb_s = tb[bboxes_indices]
                # pb_s = pb[bboxes_indices]
                iou_table_s = iou_table[bboxes_indices]
                tp = iou_table_s > 0.5
                fp = iou_table_s <= 0.5
                precision = tp / (tp + fp)
                recall = tp / tc.shape[0]
                ap += recall * precision
            mAP.append(ap)

        return torch.tensor(mAP).mean()


@torch.no_grad()
def init_weights(m):
    if type(m) == torch.nn.Conv2d:
        torch.nn.init.xavier_uniform_(m.weight)


def label_position_resize(y):
    y[1:] = (y[1:] / origin_image_size)
    return y


if __name__ == '__main__':
    origin_image_size = 256
    target_transformer = torchvision.transforms.Compose([
        label_position_resize
    ])
    banana_train_dataset = BananaDataset(root='./data/banana-detection/bananas_train', image_dir='images',
                                         annotations_file='label.csv', target_transform=target_transformer)
    sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79],
             [0.88, 0.961]]
    ratios = [[1, 2, 0.5]] * 5
    device = torch.device('cuda:0')
    sequential = TinySSD(q=1, sizes=sizes, ratios=ratios)
    sequential.to(device)
    sequential.apply(init_weights)
    optimizer = torch.optim.SGD(sequential.parameters(), lr=0.2, weight_decay=5e-4)
    ssd_object_detection = SSDObjectDetect(device=device)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 30, gamma=1.0)
    accumulator = Accumulator()
    ssd_train = SSDTrain(sequential, optimizer, scheduler,
                         torch.utils.data.DataLoader(banana_train_dataset, batch_size=32, shuffle=True),
                         epochs=30, device=device, accumulator=accumulator
                         )

    ssd_train.train()
