# !/usr/bin/env python
# -*-coding:utf-8 -*-

"""
# File       : MAPNet.py
# Author     ：CodeCat
# version    ：python 3.7
# Software   ：Pycharm
"""
from functools import partialmethod

import numpy as np
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.neighbors import KDTree
import cv2 as cv
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models

import sys
sys.path.append("E:/workspace/SOMatch")

class ResNet(nn.Module):
    def __init__(self, dilate_scale=8, pretrained=True):
        super(ResNet, self).__init__()
        model = resnet50(pretrained)

        if dilate_scale == 8:  # 8 倍下采样
            model.layer3.apply(partial(self._nostride_dilate, dilate=2))
            model.layer4.apply(partial(self._nostride_dilate, dilate=4))
        elif dilate_scale == 16:  # 16 倍下采样
            model.layer4.apply(partial(self._nostride_dilate, dilate=2))

        self.conv1 = model.conv1
        self.bn1 = model.bn1
        self.relu1 = model.relu1
        self.conv2 = model.conv2
        self.bn2 = model.bn2
        self.relu2 = model.relu2
        self.conv3 = model.conv3
        self.bn3 = model.bn3
        self.relu3 = model.relu3
        self.maxpool = model.maxpool
        self.layer1 = model.layer1
        self.layer2 = model.layer2
        self.layer3 = model.layer3
        self.layer4 = model.layer4

    @staticmethod
    def _nostride_dilate(m, dilate):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            if m.stride == (2, 2):
                m.stride = (1, 1)
                if m.kernel_size == (3, 3):
                    m.dilation = (dilate // 2, dilate // 2)
                    m.padding = (dilate // 2, dilate // 2)
            else:
                if m.kernel_size == (3, 3):
                    m.dilation = (dilate, dilate)
                    m.padding = (dilate, dilate)

    def forward(self, x):
        x = self.relu1(self.bn1(self.conv1(x)))
        x = self.relu2(self.bn2(self.conv2(x)))
        x = self.relu3(self.bn3(self.conv3(x)))
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x_aux = self.layer3(x)
        x = self.layer4(x_aux)
        return x_aux, x


class MobileNetV2(nn.Module):
    def __init__(self, downsample_factor=8, pretrained=True):
        super(MobileNetV2, self).__init__()

        model = mobilenetv2(pretrained)
        self.features = model.features[:-1]

        self.total_idx = len(self.features)
        self.down_idx = [2, 4, 7, 14]

        if downsample_factor == 8:
            for i in range(self.down_idx[-2], self.down_idx[-1]):
                self.features[i].apply(partial(self._nostride_dilate, dilate=2))
            for i in range(self.down_idx[-1], self.total_idx):
                self.features[i].apply(partial(self._nostride_dilate, dilate=4))
        else:
            for i in range(self.down_idx[-1], self.total_idx):
                self.features[i].apply(partial(self._nostride_dilate, dilate=2))

    @staticmethod
    def _nostride_dilate(m, dilate):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            if m.stride == (2, 2):
                m.stride = (1, 1)
                if m.kernel_size == (3, 3):
                    m.dilation = (dilate // 2, dilate // 2)
                    m.padding = (dilate // 2, dilate // 2)
            else:
                if m.kernel_size == (3, 3):
                    m.dilation = (dilate, dilate)
                    m.padding = (dilate, dilate)

    def forward(self, x):
        x_aux = self.features[:14](x)
        x = self.features[14:](x_aux)
        return x_aux, x


def weights_init(net, init_type='normal', init_gain=0.02):
    def init_func(m):
        classname = m.__class__.__name__
        if 'weight' in dir(m) and classname.find('Conv') != -1:
            if init_type == 'normal':
                torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
            elif init_type == 'xavier':
                torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
            elif init_type == 'kaiming':
                torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
        elif classname.find('BatchNorm2d') != -1:
            torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
            torch.nn.init.constant_(m.bias.data, 0.0)
    print('initialize network with %s type' % init_type)
    net.apply(init_func)


class _SPAPModule(nn.Module):
    def __init__(self, in_channels, pool_sizes, norm_layer):
        super(_SPAPModule, self).__init__()
        out_channels = in_channels // len(pool_sizes)

        self.stages = nn.ModuleList(
            [self._make_stages(in_channels, out_channels, pool_size, norm_layer) for pool_size in pool_sizes]
        )

        self.bottleneck = nn.Sequential(
            # nn.Conv2d(in_channels + (out_channels * len(pool_sizes)), out_channels, kernel_size=3, padding=1, bias=False),
            nn.Conv2d(out_channels * len(pool_sizes), out_channels * len(pool_sizes), kernel_size=1, bias=False),
            norm_layer(out_channels * len(pool_sizes)),
            nn.ReLU(inplace=True) #,
            # nn.Dropout2d(0.1)
        )

    @staticmethod
    def _make_stages(in_channels, out_channels, bin_size, norm_layer):
        prior = nn.AdaptiveAvgPool2d(output_size=bin_size)
        conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        bn = norm_layer(out_channels)
        relu = nn.ReLU(inplace=True)
        return nn.Sequential(prior, conv, bn, relu)

    def forward(self, features):
        h, w = features.size()[2], features.size()[3]
        # pyramids = [features]
        pyramids = []
        pyramids.extend(
            [F.interpolate(stage(features), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages])
        output = self.bottleneck(torch.cat(pyramids, dim=1))
        return output


class _SFAttention(nn.Module):
    def __init__(self, in_channel):
        super().__init__()
        # self.attention = nn.Sequential(
        #     nn.BatchNorm2d(in_channel),
        #     nn.Conv2d(in_channel, in_channel, kernel_size=1, bias=False),
        #     nn.Conv2d(in_channel, 1, kernel_size=1, bias=False),
        #     nn.Softplus()
        # )
        self.attention = nn.Sequential(
            nn.Conv2d(in_channel, in_channel, kernel_size=1, bias=False),
            nn.BatchNorm2d(in_channel),
            nn.Softplus(),
            nn.Conv2d(in_channel, 1, kernel_size=1, bias=False),
            nn.BatchNorm2d(1),
            nn.Softplus()
        )

    def forward(self, features):
        attention = self.attention(features)
        return attention


class MAPNet(nn.Module):
    def __init__(self, num_classes=100, pretrained=True):
        super(MAPNet, self).__init__()

        out_channels = 2048 # resnet50
        self.backbone = models.resnet50(pretrained=pretrained)
        self.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, padding=3, stride=2, bias=False)
        self.backbone.avgpool = nn.Sequential()
        self.backbone.fc = nn.Sequential()
        self.pyramid = _SPAPModule(out_channels, pool_sizes=(1, 2, 4), norm_layer=nn.BatchNorm2d)

        pyramid_channel = out_channels // 3 * 3
        self.attention = _SFAttention(pyramid_channel)
        self.fc = nn.Linear(pyramid_channel, num_classes)


    def forward(self, x):
        

        # x = self.backbone(x)
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)
        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)
        feature = self.pyramid(x)
        atten = self.attention(feature)
        patch_feature = feature.multiply(atten).sum(dim=(2, 3))
        scores = self.fc(patch_feature)

        return scores, patch_feature, feature, atten


class MapnetTester:
    def __init__(self, sar_net: torch.nn.Module, opt_net: torch.nn.Module, *, dimension=100, list_file="") -> None:
        self.sar_net = sar_net
        self.opt_net = opt_net
        self.dim = dimension
        self.pca = PCA(n_components=self.dim)
        self.select_num = 200
        self.pair2match = 200

        pass
    

    def test(self, templete_img, search_img) -> None:
        # TODO 读取数据
        templete_tensor = np.transpose(templete_img, [0, 3, 1, 2])
        search_tensor = np.transpose(search_img, [0, 3, 1, 2])
        templete_tensor = torch.tensor(templete_tensor)
        search_tensor = torch.tensor(search_tensor)
        templete_tensor = templete_tensor.to("cuda")
        search_tensor = search_tensor.to("cuda")
        
        # TODO 提取特征
        with torch.no_grad():
            _, _, templete_feature, templete_atten = self.sar_net(templete_tensor)
            _, _, search_feature, search_atten = self.opt_net(search_tensor)

        # !!!! added
        templete_feature = templete_feature * templete_atten
        search_feature = search_feature * search_atten

        templete_feature = templete_feature.cpu()
        templete_atten = templete_atten.cpu()
        search_feature = search_feature.cpu()
        search_atten = search_atten.cpu()
        
        # TODO 筛选特征
        templete_atten = templete_atten.view(templete_atten.size()[0], -1)
        search_atten = search_atten.view(search_atten.size()[0], -1)
        _, templete_ids = torch.sort(templete_atten, 1, descending=True)
        _, search_ids = torch.sort(search_atten, 1, descending=True)
        print("d", search_ids.shape)
        print("d1", search_atten[[0, 1, 2, 3], search_ids[:, self.select_num]])

        templete_ids = templete_ids.numpy()
        search_ids = search_ids.numpy()
        templete_feature = templete_feature.numpy()
        search_feature = search_feature.numpy()

        templete_shape = templete_feature.shape[-2:]
        search_shape = search_feature.shape[-2:]
        templete_x, templete_y = np.fromfunction(
            lambda row, col: (32*col, 32*row), templete_shape, 
            dtype=int
        )
        search_x, search_y = np.fromfunction(
            lambda row, col: (32*col, 32*row), search_shape, 
            dtype=int
        )
        templete_x = templete_x.reshape(-1)
        templete_y = templete_y.reshape(-1)
        search_x = search_x.reshape(-1)
        search_y = search_y.reshape(-1)

        templete_select_num = min(self.select_num, templete_x.shape[0])
        search_select_num = min(self.select_num, search_x.shape[0])
        print(templete_select_num, search_select_num)
        templete_select_ids = templete_ids[:, :templete_select_num]
        search_select_ids = search_ids[:, :search_select_num]
        print("a", search_select_ids.shape)

        t_batch, t_ch = templete_feature.shape[0], templete_feature.shape[1]
        templete_feature = templete_feature.reshape(t_batch, t_ch, -1)
        s_batch, s_ch = search_feature.shape[0], search_feature.shape[1]
        search_feature = search_feature.reshape(s_batch, s_ch, -1)
        templete_select_feature = np.transpose(
            templete_feature, (0, 2, 1)
        )
        search_select_feature = np.transpose(
            search_feature, (0, 2, 1)
        )
        tmp = np.arange(t_batch).reshape(t_batch, 1)
        ids_tmp = np.broadcast_to(tmp, (t_batch, templete_select_ids.shape[1]))
        templete_select_feature = templete_select_feature[ids_tmp, templete_select_ids]
        ids_tmp = np.broadcast_to(tmp, (s_batch, search_select_ids.shape[1]))
        search_select_feature = search_select_feature[ids_tmp, search_select_ids]
        print("b", search_select_feature.shape)
        templete_select_x = templete_x[templete_select_ids]
        templete_select_y = templete_y[templete_select_ids]
        search_select_x = search_x[search_select_ids]
        search_select_y = search_y[search_select_ids]

        for ids in range(s_batch):
            tx = templete_select_x[ids]
            ty = templete_select_y[ids]
            sx = search_select_x[ids]
            sy = search_select_y[ids]
            t_featrue = templete_select_feature[ids]
            s_featrue = search_select_feature[ids]
            s_start_ids = t_featrue.shape[0]

            t_pt = np.stack((tx, ty), axis=1)
            s_pt = np.stack((sx, sy), axis=1)

            # TODO 特征预处理
            print(t_featrue.shape, s_featrue.shape)
            mixed_featrue = np.concatenate((t_featrue, s_featrue), axis=0)
            mixed_featrue = self._preprocess(mixed_featrue)
            t_featrue = mixed_featrue[0:s_start_ids]
            s_featrue = mixed_featrue[s_start_ids:]

            # TODO 编码
            
            # TODO 最近邻
            tree = KDTree(s_featrue, leaf_size=10)
            dist, ind = tree.query(t_featrue)
            pairs_num = min(self.pair2match, dist.shape[0])
            print("dist.shape:", dist.shape)
            t_ids = dist[:, 0].argsort()[:pairs_num]
            # dist = dist[t_ids]
            # s_ids = ind[t_ids, 0]
            # t_pt = t_pt[t_ids]
            # s_pt = s_pt[s_ids]
            dist = dist[t_ids]
            s_pt = s_pt[ind[:, 0]]
            s_pt = s_pt[t_ids]
            t_pt = t_pt[t_ids]
            
            # tree = KDTree(s_featrue, leaf_size=10)
            # dist, ind = tree.query(t_featrue, k=2)
            # print(dist)
            # dist = np.array(dist)
            # ind = np.array(ind)
            # pairs_num = min(self.pair2match, dist.shape[0])
            # t_ids = dist[:, 0].argsort()[:pairs_num]
            # dist = dist[t_ids]
            # s_ids = ind[t_ids, 0]
            # t_pt = t_pt[t_ids]
            # s_pt = s_pt[s_ids]
            
            
            # TODO Ransac
            print(t_pt.shape)
            ransacReprojThreshold = 4
            H, mask =cv.findHomography(
                t_pt, s_pt, cv.RANSAC,ransacReprojThreshold
            )

            # TODO logger

            # TODO 记录结果
            match_mask = mask.ravel().tolist()
            ptt = [cv.KeyPoint(xy[0], xy[1], 1) for xy in t_pt]
            pts = [cv.KeyPoint(xy[0], xy[1], 1) for xy in s_pt]
            matches1to2 = [cv.DMatch(i, i, d) for i, d in zip(range(len(pts)), dist)]
            t_img = (255*templete_img[ids]).astype(np.uint8)
            s_img = (255*search_img[ids]).astype(np.uint8)
            img_match = cv.drawMatches(
                t_img, ptt, 
                s_img, pts, 
                matches1to2, None, 
                matchesMask=match_mask, 
                flags = 2, 
                singlePointColor = None, 
                # matchColor = (0,255,0)
            )
            cv.imwrite(f"E:/datasets/client-data/match_test/match_{ids}.png", img_match)


    def _preprocess(self, features : np.ndarray) -> np.ndarray:
        features = normalize(features, norm='l2')
        pca_features = self.pca.fit_transform(features)
        pca_features = normalize(pca_features, norm='l2')

        return pca_features


# test training the mapnet
if __name__ == "__main__1":
    import torch 
    import torchvision
    import torch.nn.functional as F
    import matplotlib.pyplot as plt
    import numpy as np
    from datasets.patch_pair_dataset import Sen12PatchPairDataset

    class attributes:
        def __init__(self):
            self.cache_dir = "cache/sen12/train"
            self.cache_size = 50000
            self.base_dir = "E:/datasets/sen1-2"
            self.list_file = "E:/workspace/SOMatch/tmp/json/sen12_list/overlap_subset.json"
            self.augment = True
            self.normalize = {}
            self.single_domain = ""

    def imshow(img):
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.show()

    lr = 0.0001
    epoch = 10
    device = "cuda"
    loss_fn = F.cross_entropy
    config = attributes()
    dataset = Sen12PatchPairDataset(config)
    batch_size = 20
    trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)

    FtsA = MAPNet(num_classes=len(dataset))
    FtsB = MAPNet(num_classes=len(dataset))
    model_params = []
    for model in [FtsA, FtsB]:
        model_params.extend( list(filter(lambda p: p.requires_grad, model.parameters())) )
    optimizer = torch.optim.SGD(model_params, lr=lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, gamma=0.1, verbose=True)
    FtsA.train()
    FtsB.train()

    FtsA.to(device)
    FtsB.to(device)

    for i in range(epoch):
        for j, (img_sar_tensor, img_opt_tensor, class_id) in enumerate(trainloader):
            # img_sar_tensor, img_opt_tensor, class_id = dataIter.next()
            # images = torch.cat((img_sar_tensor, img_opt_tensor), dim=0)

            # imshow(torchvision.utils.make_grid(images))

            img_sar_tensor = img_sar_tensor.to(device)
            img_opt_tensor = img_opt_tensor.to(device)
            class_id = class_id.to(device)

            sar_scores, _, _, sar_atten = FtsA(img_sar_tensor)
            opt_scores, _, _, opt_atten  = FtsB(img_opt_tensor)
            optimizer.zero_grad()
            loss = loss_fn(sar_scores, class_id)
            loss += loss_fn(opt_scores, class_id)
            loss.backward()
            optimizer.step()

            print("Epoch: {} Iter: {} Loss: {:.5f}".format(i, j, loss.item()/(2)))
            
        scheduler.step()

# test the test of mapnet
if __name__ == "__main__":
    import matplotlib.pyplot as plt

    device = "cuda"

    FtsA = MAPNet(num_classes=100)
    FtsB = MAPNet(num_classes=100)
    FtsA.load_state_dict(torch.load(
        "E:\workspace\SOMatch\weights\mapnet_single_patch_sp\s_Fts1_7200.pth"
    ))
    FtsB.load_state_dict(torch.load(
        "E:\workspace\SOMatch\weights\mapnet_single_patch_sp\s_Fts1_7200.pth"
    ))
    tester = MapnetTester(FtsA, FtsB)

    FtsA.to(device)
    FtsB.to(device)

    opt_path = "E:\datasets\client-data\match_test\match\opt.png"
    sar_path = "E:\datasets\client-data\match_test\match\sar.png"

    opt_img = plt.imread(opt_path)
    opt_img = opt_img.mean(axis=2)
    if len(opt_img.shape) == 2:
        opt_img = np.expand_dims(opt_img, axis=2)
    sar_img = plt.imread(sar_path)
    sar_img = cv.medianBlur(sar_img, ksize=3)
    if len(sar_img.shape) == 2:
        sar_img = np.expand_dims(sar_img, axis=2)
    else:
        sar_img = sar_img[:, :, 0]
        sar_img = np.expand_dims(sar_img, axis=2)
    # opt_img = np.expand_dims(opt_img, axis=0)
    # sar_img = np.expand_dims(sar_img, axis=0)
    opt_img = np.stack((opt_img, opt_img, opt_img, opt_img), axis=0)
    sar_img = np.stack((sar_img, sar_img, sar_img, sar_img), axis=0)

    tester.test(sar_img, opt_img)
