# -*- coding: utf-8 -*-

import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from tensorboardX import SummaryWriter

from backbone.resnet import resnet50
from region_proposal_network import RegionProposalNetwork, compute_rpn_loss
from voc_data_loader import get_voc_data_loader
from voc_dataset import VOCDataset
from faster_rcnn_config import Config

import matplotlib.pyplot as plt
import matplotlib.patches as patches
from utils.rpn.loc2bbox import loc2bbox
from utils.rpn.generate_anchor_base import generate_anchor_base
from utils.rpn.enumerate_shifted_anchor import enumerate_shifted_anchor
from utils.rpn.backbone_network_output_size import get_backbone_network_output_size


class FasterRCNN(object):
    def __init__(self, backbone, rpn):
        self.backbone = backbone
        self.rpn = rpn
        self.optimizer = None

        self.backbone.cuda()
        self.rpn.cuda()

        self._optimizer_init()

    def _optimizer_init(self):
        if not self.optimizer:
            # params = [{'params': self.backbone.parameters(), 'lr': 1e-4},
            #           {'params': self.rpn.parameters(), 'lr': 1e-2}]
            # self.optimizer = torch.optim.SGD(params=params,
            #                                  momentum=0.9,
            #                                  weight_decay=1e-5)
            params = [{'params': self.backbone.parameters(), 'lr': 1e-3},
                      {'params': self.rpn.parameters(), 'lr': 1e-3}]
            self.optimizer = torch.optim.Adam(params=params,
                                              weight_decay=1e-5)

    def _optimizer_update(self):
        for param_group in self.optimizer.param_groups:
            param_group['lr'] *= 0.1

    def train(self, data_loader, epoch_start=0, epochs=50, resume_file_path=None):
        if resume_file_path:
            resume_file = torch.load(resume_file_path)
            self.backbone.load_state_dict(resume_file['model_backbone'])
            self.rpn.load_state_dict(resume_file['model_rpn'])
            self.optimizer.load_state_dict(resume_file['optimizer'])
        summary_folder = os.path.join(Config.TRAIN_PROCESS_SAVE_FOLDER, 'train_logs', 'rpn')
        summary_writer = SummaryWriter(log_dir=summary_folder)
        global_step = 0
        for epoch in range(epoch_start, epoch_start + epochs):
            # if epoch in [10, 30]:
            #     self._optimizer_update()
            for step, data in enumerate(data_loader):
                start = time.time()

                images, object_labels, object_locations = data
                images = [image.cuda() for image in images]
                object_labels = object_labels.cuda()
                object_locations = object_locations.cuda()

                predict_labels, predict_locations = list(), list()
                for image in images:
                    p_labels, p_locations = self.rpn(self.backbone(
                        torch.unsqueeze(image, dim=0)))
                    predict_labels.append(p_labels)
                    predict_locations.append(p_locations)
                predict_labels = torch.cat(predict_labels, dim=0)
                predict_locations = torch.cat(predict_locations, dim=0)
                rpn_loss, location_loss, classification_loss = compute_rpn_loss(
                    predict_labels, predict_locations, object_labels, object_locations)

                self.optimizer.zero_grad()
                rpn_loss.backward()
                self.optimizer.step()

                end = time.time()

                global_step += 1
                if global_step % 10 == 0:
                    summary_writer.add_scalar(
                        'classification_loss', classification_loss.item(), global_step)
                    summary_writer.add_scalar(
                        'location_loss', location_loss.item(), global_step)
                    summary_writer.add_scalar(
                        'rpn_loss', rpn_loss.item(), global_step)
                    print(F'epoch: {epoch + 1}, step: {step + 1}, time: {end - start:.2f} '
                          F'classification loss: {classification_loss.item():.4f} '
                          F'location loss: {location_loss.item():.4f} '
                          F'rpn loss: {rpn_loss.item():.4f}')

            if (epoch + 1) % 5 == 0:
                save_path = os.path.join(Config.TRAIN_PROCESS_SAVE_FOLDER,
                                         'models', 'rpn', F'{epoch + 1:>05d}.pth')
                state = {'model_backbone': self.backbone.state_dict(),
                         'model_rpn': self.rpn.state_dict(),
                         'optimizer': self.optimizer.state_dict()}
                torch.save(state, save_path)

    def predict(self, image):
        p_labels, p_locations = self.rpn(self.backbone(torch.unsqueeze(image, dim=0)))
        return p_labels, p_locations


def recover_image(tensor_image):
    image = tensor_image.numpy()
    image = np.transpose(image, axes=(1, 2, 0))
    image *= 255
    image = image.astype(dtype=np.uint8, copy=False)
    return image


def test():
    import random

    check_point_file_path = r'D:\python_project_data\faster_rcnn\models\rpn\00050.pth'
    check_point_file = torch.load(check_point_file_path)
    backbone_net = resnet50()
    rpn_net = RegionProposalNetwork()
    backbone_net.load_state_dict(check_point_file['model_backbone'])
    rpn_net.load_state_dict(check_point_file['model_rpn'])
    # backbone_net.cuda().eval()
    # rpn_net.cuda().eval()

    faster_rcnn = FasterRCNN(backbone=backbone_net, rpn=rpn_net)

    anchor_base = generate_anchor_base()
    data_loader = get_voc_data_loader(batch_size=1)
    for step, data in enumerate(data_loader):
        images, object_labels, object_locations = data
        image = images[0]

        show_image = recover_image(image)
        input_image_size = (image.size(1), image.size(2))
        feature_map_size = get_backbone_network_output_size(input_image_size)
        anchors = enumerate_shifted_anchor(anchor_base,
                                           feat_stride=16,
                                           height=feature_map_size[0],
                                           width=feature_map_size[1])

        image = image.cuda()
        p_labels, p_locations = faster_rcnn.predict(image)
        p_labels = F.softmax(p_labels, dim=1)
        p_locations = p_locations.detach().cpu().numpy()
        p_labels = p_labels.detach().cpu().numpy()[:, 1]

        print(p_locations[0:4, :])

        p_locations = loc2bbox(anchors, p_locations)

        # draw positive anchor
        object_labels = object_labels.numpy()
        object_locations = object_locations.numpy()
        object_locations = loc2bbox(anchors, object_locations)

        pos_anchors = anchors[object_labels == 1]
        pos_p_locations = p_locations[object_labels == 1]
        pos_p_labels = p_labels[object_labels == 1]
        object_locations = object_locations[object_labels == 1]
        print(F'pos anchors: {pos_anchors.shape}, '
              F'pos_p_locations: {pos_p_locations.shape}, '
              F'pos_p_labels: {pos_p_labels.shape} '
              F'object_locations: {object_locations.shape}')
        fig, axes = plt.subplots(1, 1)
        axes.imshow(show_image)
        index = random.choice(range(len(pos_anchors)))
        y1, x1, y2, x2 = pos_anchors[index]
        rect = patches.Rectangle(
            (x1, y1), (x2 - x1), (y2 - y1), linewidth=2, edgecolor='r', facecolor='none')
        axes.add_patch(rect)
        y1, x1, y2, x2 = pos_p_locations[index]
        rect = patches.Rectangle(
            (x1, y1), (x2 - x1), (y2 - y1), linewidth=2, edgecolor='g', facecolor='none')
        axes.add_patch(rect)
        y1, x1, y2, x2 = object_locations[index]
        rect = patches.Rectangle(
            (x1, y1), (x2 - x1), (y2 - y1), linewidth=2, edgecolor='b', facecolor='none')
        axes.add_patch(rect)
        plt.show()

        break


def train():
    backbone = resnet50()
    # check_point_file_path = r'G:\models\pytorch\resnet50-19c8e357.pth'
    # check_point = torch.load(check_point_file_path)
    # backbone.load_state_dict(check_point, strict=False)
    rpn = RegionProposalNetwork()
    data_loader = get_voc_data_loader(batch_size=1)

    faster_rcnn = FasterRCNN(backbone=backbone, rpn=rpn)
    faster_rcnn.train(data_loader=data_loader)


def main():
    # train()
    test()


if __name__ == "__main__":
    main()
