#%%
import time
import os
import torch
import itertools
import math
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tqdm.notebook import tqdm


torch.set_printoptions()


def show_images(imgs, num_rows, num_cols, scale=2):
    figsize = (num_rows * scale, num_cols * scale)
    _, axes = plt.subplots(
        num_rows, num_cols, figsize=figsize, constrained_layout=True
    )
    for i in range(num_rows):
        for j in range(num_cols):
            axes[i][j].imshow(imgs[i * num_cols + j])
            axes[i][j].get_xaxis().set_visible(False)
            axes[i][j].get_yaxis().set_visible(False)
    plt.tight_layout(h_pad=10)
    return axes


def apply(img, aug, num_rows=2, num_cols=2, scale=5):
    Y = [aug(img) for _ in range(num_rows * num_cols)]
    show_images(Y, num_rows, num_cols, scale)


def GenerateAnchors(
    feature_map, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5]
):
    pairs = []
    for r in ratios:
        pairs.append([sizes[0], r])
    for s in sizes[1:]:
        pairs.append([s, ratios[0]])

    pairs = np.array(pairs)
    base_x = pairs[:, 0] * pairs[:, 1]
    base_y = pairs[:, 0] / pairs[:, 1]

    base_anchors = (
        np.stack([-base_x, -base_y, base_x, base_y], axis=1) / 2
    )
    h, w = feature_map.shape[-2:]
    shifts_x = np.arange(0, w) / w
    shifts_y = np.arange(0, h) / h
    shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
    shifts_x, shifts_y = shifts_x.reshape(-1), shifts_y.reshape(-1)
    shifts = np.stack(
        [shifts_x, shifts_y, shifts_x, shifts_y], axis=1
    )
    anchors = shifts.reshape((-1, 1, 4)) + base_anchors.reshape(
        (1, -1, 4)
    )

    return torch.tensor(anchors, dtype=torch.float).view(-1, 4)


def bbox_to_rect(bbox, color="r"):

    return plt.Rectangle(
        (bbox[0], bbox[1]),
        bbox[2] - bbox[0],
        bbox[3] - bbox[1],
        fill=False,
        color=color,
        lw=3,
    )


def show_boxes(axes, bboxes, labels=None, colors=None):
    def _make_list(obj, default=None):
        if obj is None:
            return default
        elif not isinstance(obj, list):
            return [obj]
        else:
            return obj

    labels = _make_list(labels)
    colors = _make_list(colors, ["r", "c", "b", "w", "m"])
    for i, bbox in enumerate(bboxes):
        color = colors[i % len(colors)]
        rect = bbox_to_rect(bbox, color)
        axes.add_patch(rect)
        if labels and len(labels) > i:
            text_color = "k" if color == "w" else "w"
            axes.text(
                rect.xy[0],
                rect.xy[1],
                str(labels[i])[:3],
                va="center",
                ha="center",
                fontsize=6,
                color=text_color,
                bbox=dict(facecolor=color, lw=0),
            )


def intersection(bboxes1, bboxes2):
    left_corner = torch.max(
        bboxes1[:, :2].unsqueeze(1), bboxes2[:, :2].unsqueeze(0)
    )
    right_corner = torch.min(
        bboxes1[:, 2:].unsqueeze(1), bboxes2[:, 2:].unsqueeze(0)
    )
    inner_wh = torch.clamp(right_corner - left_corner, min=0)
    return inner_wh[:, :, 0] * inner_wh[:, :, 1]


def jaccard(bboxes1, bboxes2):
    areas1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (
        bboxes1[:, 3] - bboxes1[:, 1]
    )
    areas2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (
        bboxes2[:, 3] - bboxes2[:, 1]
    )
    inter = intersection(bboxes1, bboxes2)
    union = areas1.unsqueeze(1) + areas2.unsqueeze(0) - inter
    return inter / union


def assign_anchor(bbox, anchor, jaccard_threshold=0.5):
    num_anchor, num_bbox = anchor.shape[0], bbox.shape[0]
    jaccard_matrix = jaccard(bbox, anchor)
    assigned_idx = np.ones(num_anchor) * -1
    jaccard_cp = jaccard_matrix.numpy().copy()
    for i in range(num_bbox):
        j = np.argmax(jaccard_cp[i])
        assigned_idx[j] = i
        jaccard_cp[i] = float("-inf")

    unsigned_anchor_neighbor_bbox = jaccard_matrix[
        :, assigned_idx == -1
    ].argmax(axis=0)
    # print(assigned_idx, unsigned_anchor_neighbor_bbox)
    over_threshold_idx = (
        jaccard_matrix[
            unsigned_anchor_neighbor_bbox, assigned_idx == -1
        ]
        > jaccard_threshold
    )
    assigned_idx[assigned_idx == -1][
        over_threshold_idx
    ] = unsigned_anchor_neighbor_bbox[over_threshold_idx]
    return torch.tensor(assigned_idx, dtype=torch.long)


def xy_to_cxcy(xy):
    return torch.cat(
        [(xy[:, 2:] + xy[:, :2]) / 2, xy[:, 2:] - xy[:, :2]], 1
    )


def BatchAnchorTarget(anchors, label):
    center_anc = xy_to_cxcy(anchors)

    def _AnchorTargetOne(anchors, label):
        assigned_idx = assign_anchor(label[:, 1:], anchors)
        bbox_mask = (
            (assigned_idx > -1).float().unsqueeze(-1)
        ).repeat(1, 4)
        anc_labels = torch.cat(
            [torch.tensor([[-1, 0, 0, 0, 0]]), label]
        )[assigned_idx + 1]
        # print(anc_labels)
        # cls_labels = torch.gather(label[:, 0], 0, assigned_idx+1)
        # assigned_bb = torch.gather(label[:, 1:], 0, assigned_idx+1)
        center_assigned_bb = xy_to_cxcy(anc_labels[:, 1:])
        offset_xy = (
            10.0
            * (center_assigned_bb[:, :2] - center_anc[:, 2:])
            / center_anc[:, 2:]
        )
        offset_wh = 5.0 * torch.log(
            1e-6 + center_assigned_bb[:, 2:] / center_anc[:, 2:]
        )
        offset = torch.cat([offset_xy, offset_wh], dim=1) * bbox_mask
        return offset.view(-1), bbox_mask.view(-1), anc_labels[:, 0]

    batch_offset, batch_mask, batch_cls_labels = [], [], []
    for b in range(label.shape[0]):
        offset, bbox_mask, cls_labels = _AnchorTargetOne(
            anchors, label[b]
        )
        batch_offset.append(offset)
        batch_mask.append(bbox_mask)
        batch_cls_labels.append(cls_labels)
    batch_offset = torch.stack(batch_offset)
    batch_mask = torch.stack(batch_mask)
    batch_cls_labels = torch.stack(batch_cls_labels)
    return [batch_offset, batch_mask, batch_cls_labels]


def non_max_suppression(anc_info_lis, nms_threshold=0.5):
    anc_info_lis = torch.stack(
        sorted(anc_info_lis, key=lambda item: item[1])
    )
    # anc_info_lis
    # print(anc_info_lis)
    output = []
    while len(anc_info_lis) != 0:
        output.append(anc_info_lis[-1])

        anc_info_lis = anc_info_lis[:-1]
        if anc_info_lis.shape[0] == 0:
            print("Over")
            break
        iou = jaccard(
            output[-1][2:].unsqueeze(0), anc_info_lis[:, 2:]
        )
        anc_info_lis = anc_info_lis[
            ~(iou.T > nms_threshold).repeat(1, 6)
        ].reshape(-1, 6)

    # print(output)
    return torch.stack(output)


def GenerateMajorAnchors(
    fmap_w, fmap_h, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5]
):
    offset_x, offset_y = 1.0 / fmap_w, 1.0 / fmap_h
    anchors = GenerateAnchors(
        torch.zeros(fmap_h, fmap_w), sizes=sizes
    ) + torch.tensor(
        [offset_x / 2, offset_y / 2, offset_x / 2, offset_y / 2]
    )
    return anchors


class Car2dDataset(torch.utils.data.Dataset):
    data_dir = "data_object_image_2/training/image_2"
    image_size = (256, 256)

    def __init__(self, training, label, transforms):
        self.training = training
        self.label = label
        self.transforms = transforms

    def __len__(self):
        return len(self.label)

    def __getitem__(self, index):
        img_name = "0" * (6 - len(str(index))) + str(index) + ".png"
        PIL_img = (
            Image.open(os.path.join(self.data_dir, img_name))
            .convert("RGB")
            .resize(self.image_size)
        )
        label = self.label[index]
        img = self.transforms[self.training](PIL_img)
        return img, label


def cls_predictor(input_channels, num_anchors, num_classes):
    return nn.Conv2d(
        input_channels, num_anchors * (num_classes + 1), 3, 1, 1
    )


def bbox_predictor(input_channels, num_anchors):
    return nn.Conv2d(input_channels, num_anchors * 4, 3, 1, 1)


def flatten_pred(pred):
    return pred.permute(0, 2, 3, 1).reshape(pred.size(0), -1)


def concat_preds(preds):
    return torch.cat([flatten_pred(p) for p in preds], dim=1)


def down_sample_blk(in_channels, num_channels):
    blk = []
    for _ in range(2):
        blk.append(nn.Conv2d(in_channels, num_channels, 3, 1, 1))
        blk.append(nn.BatchNorm2d(num_channels))
        blk.append(nn.ReLU())
        in_channels = num_channels
    blk.append(nn.MaxPool2d(2, 2))
    return nn.Sequential(*blk)


def base_net():
    blk = []
    num_filters = [3, 16, 32, 64]
    for i in range(len(num_filters) - 1):
        blk.append(
            down_sample_blk(num_filters[i], num_filters[i + 1])
        )
    blk = nn.Sequential(*blk)
    return blk


def create_anchors(f_map_sizes, steps, sizes):
    scale = 256.0
    steps = [s / scale for s in steps]
    sizes = [s / scale for s in sizes]
    aspect_ratios = ((2,),)
    num_layers = len(f_map_sizes)
    boxes = []
    for i in range(num_layers):
        fmsize = f_map_sizes[i]
        for h, w in itertools.product(range(fmsize), repeat=2):
            cx = (w + 0.5) * steps[i]
            cy = (h + 0.5) * steps[i]
            s = sizes[i + 1]
            boxes.append((cx, cy, s, s))
            s = sizes[i]
            boxes.append((cx, cy, s, s))
            for ar in aspect_ratios[i]:
                boxes.append(
                    (cx, cy, (s * math.sqrt(ar)), (s / math.sqrt(ar)))
                )
                boxes.append(
                    (cx, cy, (s / math.sqrt(ar)), (s * math.sqrt(ar)))
                )
    return torch.Tensor(boxes)


def blk_forward(X, blk, size, ratio, cls_predictor, bbox_predictor):
    Y = blk(X)
    # anchors = create_anchors((Y.size(2),), (256 / Y.size(2),), size)
    anchors = GenerateMajorAnchors(
        Y.shape[2], Y.shape[3], size, ratio
    )

    cls_preds = cls_predictor(Y)
    bbox_preds = bbox_predictor(Y)
    return (Y, anchors, cls_preds, bbox_preds)


class TinySSD(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(TinySSD, self).__init__()
        self.num_classes = num_classes

        self.blk = nn.ModuleList(
            [
                base_net(),
                down_sample_blk(64, 128),
                down_sample_blk(128, 128),
                down_sample_blk(128, 128),
                nn.AdaptiveAvgPool2d((1, 1)),
            ]
        )
        self.cls = nn.ModuleList(
            [
                cls_predictor(64, num_anchors, num_classes),
                cls_predictor(128, num_anchors, num_classes),
                cls_predictor(128, num_anchors, num_classes),
                cls_predictor(128, num_anchors, num_classes),
                cls_predictor(128, num_anchors, num_classes),
            ]
        )
        self.bbox = nn.ModuleList(
            [
                bbox_predictor(64, num_anchors),
                bbox_predictor(128, num_anchors),
                bbox_predictor(128, num_anchors),
                bbox_predictor(128, num_anchors),
                bbox_predictor(128, num_anchors),
            ]
        )

    def forward(self, X):
        anchors, cls_preds, bbox_preds = [], [], []
        for i in range(5):
            X, anchor, cls_pred, bbox_pred = blk_forward(
                X,
                self.blk[i],
                sizes[i],
                ratios[i],
                self.cls[i],
                self.bbox[i],
            )
            # print(X.shape, cls_pred.shape)
            anchors.append(anchor)
            cls_preds.append(cls_pred)
            bbox_preds.append(bbox_pred)
        return (
            torch.cat(anchors),
            concat_preds(cls_preds).reshape(
                (-1, 5444, self.num_classes + 1)
            ),
            concat_preds(bbox_preds),
        )


sizes = [
    [0.2 * 256, 0.272 * 256],
    [0.37 * 256, 0.447 * 256],
    [0.54 * 256, 0.619 * 256],
    [0.71 * 256, 0.79 * 256],
    [0.88 * 256, 0.961 * 256],
]
ratios = [[1, 2, 0.5]] * 5
num_anchors = len(sizes[0]) + len(ratios[0]) - 1


def init_weights(m):
    if type(m) == nn.Linear or type(m) == nn.Conv2d:
        torch.nn.init.xavier_uniform_(m.weight)


net = TinySSD(3, num_classes=1)
net.apply(init_weights)

training_label = torch.load("training_label.pt")

transforms = {
    True: torchvision.transforms.Compose(
        [
            # torchvision.transforms.CenterCrop((360, 1200)),
            torchvision.transforms.ToTensor(),
        ]
    ),
    False: torchvision.transforms.ToTensor(),
}
batch_size = 32
dataset = Car2dDataset(True, training_label, transforms)
data_size = len(dataset)
train_set, valid_set = torch.utils.data.random_split(
    dataset, [int(data_size * 0.2), data_size - int(data_size * 0.2)]
)
valid_set.training = False
train_iter = torch.utils.data.DataLoader(
    train_set, batch_size, shuffle=True
)
valid_iter = torch.utils.data.DataLoader(valid_set, batch_size)
learning_rate = 1e-3
weight_decay = 5e-4
optimizer = optim.SGD(
    net.parameters(), lr=learning_rate, weight_decay=weight_decay
)

#%%
cls_loss = torch.nn.CrossEntropyLoss()
bbox_loss = torch.nn.L1Loss()


def calc_loss(
    cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks
):
    cls_l = cls_loss(cls_preds, cls_labels)
    bbox_l = bbox_loss(
        bbox_preds * bbox_masks, bbox_labels * bbox_masks
    )
    return cls_l + bbox_l


def cls_eval(cls_preds, cls_labels):
    return (cls_preds.argmax(axis=-1) == cls_labels).sum().item()


def bbox_eval(bbox_preds, bbox_labels, bbox_masks):
    return (
        ((bbox_labels - bbox_preds) * bbox_masks).abs().sum().item()
    )


for epoch in range(20):
    acc_sum, mae_sum, n, m = 0, 0, 0, 0
    for X, Y in tqdm(train_iter, leave=False):
        anchors, cls_preds, bbox_preds = net(X)
        bbox_labels, bbox_masks, cls_labels = BatchAnchorTarget(
            anchors, Y
        )
        l = calc_loss(
            cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks
        )
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
        acc_sum += cls_eval(cls_preds, cls_labels)
        mae_sum += bbox_eval(bbox_preds, bbox_labels, bbox_masks)
        m += bbox_labels.size
    if (epoch + 1) % 5 == 0:
        print(
            "epoch %2d, class err %.2e, bbox mae %.2e, time %.1f sec"
            % (
                epoch + 1,
                1 - acc_sum / n,
                mae_sum / m,
                time.time() - start,
            )
        )
#%%
"""***********************"""
img = Image.open("data_object_image_2/training/image_2/000052.png")
w, h = img.size
print(w, h)

plt.figure(figsize=(12, 6))
# fig = plt.imshow(img)

anchors = torch.tensor(
    [
        [200, 100, 400, 200],
        [210, 110, 410, 210],
        [250, 100, 450, 200],
        [200, 150, 400, 250],
        [300, 100, 500, 200],
        [500, 100, 700, 200],
    ]
)
labels = torch.tensor(
    [
        [
            [0, 250, 150, 450, 250],
            [1, 550, 150, 750, 250],
        ]
    ]
)
bbox = torch.tensor([[250, 150, 450, 250], [550, 150, 750, 250]])

batch_offset, batch_mask, batch_cls_labels = BatchAnchorTarget(
    anchors, labels
)
nms_anchor_info = non_max_suppression(
    torch.cat(
        [
            torch.tensor(
                [
                    [1 + i / 10, 2 + i / 10]
                    for i in range(len(anchors))
                ]
            ),
            anchors,
        ],
        axis=1,
    )
)
# show_boxes(fig.axes, anchors * torch.tensor([w, h, w, h]))

anchors = GenerateMajorAnchors(3, 3, sizes=[0.2])


plt.imshow(next(iter(dataloader))[0][0].permute(1, 2, 0))
batch = next(iter(dataloader))
imgs = batch[0].permute(0, 2, 3, 1)
bboxes = batch[1][:, :, 1:]
plt.figure(figsize=(12, 6))
axes = show_images(imgs, 2, 2).flatten()
for ax, bbox in zip(axes, bboxes):
    show_boxes(ax, bbox / 4)

X = torch.zeros((32, 3, 256, 256))
anchors, cls_preds, bbox_preds = net(X)

print("output anchors:", anchors.shape)
print("output class preds:", cls_preds.shape)
print("output bbox preds:", bbox_preds.shape)
#%%
torch.cuda.is_available()

nn.Flatten