"""
This file (model_zoo.py) is designed for:
    
Copyright (c) 2022, Yongjie Duan. All rights reserved.
"""
import os
import sys

# os.chdir(sys.path[0])
import os.path as osp
import numpy as np
from glob import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from units import *
import cv2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

class OrientationNet(nn.Module):
    def __init__(self,stride=8,window_size=17):
        super(OrientationNet, self).__init__()
        sobelx = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32).view(1, 1, 3, 3)
        sobely = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=torch.float32).view(1, 1, 3, 3)
        self.sobelx = nn.Parameter(sobelx, requires_grad=False)
        self.sobely = nn.Parameter(sobely, requires_grad=False)
        self.stride = stride
        print(self.stride)
        self.window_size = window_size
        E = torch.tensor(np.ones([1,1,window_size,window_size]),dtype=torch.float32)
        self.E = nn.Parameter(E,requires_grad=False)
        guassian_kernel = self._gaussian_kernel_2d(5, 1.5)
        self.guassian_kernel = nn.Parameter(guassian_kernel,requires_grad=False)
    def _gaussian_kernel_2d(self,window_size, sigma):
        gauss = torch.Tensor([math.exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
        gauss = gauss / gauss.sum()
        gauss_2d = torch.outer(gauss, gauss)
        return gauss_2d
        
    def get_orientation(self, img):
        print(self.sobelx)
        dx = F.conv2d(img, self.sobelx, padding=1)
        dy = F.conv2d(img, self.sobely, padding=1)
        # wise_element
        dx2 = dx * dx
        dy2 = dy * dy
        dxdy = dx * dy
        # range sum element
        dx2 = F.conv2d(dx2, self.E, stride=self.stride, padding=0)
        dy2 = F.conv2d(dy2, self.E, stride=self.stride, padding=0)
        dxdy = F.conv2d(dxdy, self.E, stride=self.stride, padding=0)
        # get orientation
        dx2_dy2 = dx2 - dy2
        theta = torch.atan2(2 * dxdy, dx2_dy2)+np.pi
        # guassian filter
        phi_x = F.conv2d(torch.cos(theta), self.guassian_kernel.view(1, 1, 5, 5), padding=2)
        phi_y = F.conv2d(torch.sin(theta), self.guassian_kernel.view(1, 1, 5, 5), padding=2)
        theta = torch.atan2(phi_y, phi_x)/2
        return theta  
    def draw_orientation_with_arrow(self, theta):
        theta = theta.cpu().detach().numpy()
        theta = theta[0,0,:,:]
        # 画图
        plt.figure(figsize=(10, 10))
        plt.gca().invert_yaxis()

        # 画箭头
        plt.quiver(np.cos(theta),np.sin(theta), color='r', angles='xy', scale_units='xy', scale=1)
        plt.savefig('orientation.png')
        plt.close()
        return theta
   
        
        

class FingerNet(nn.Module):
    def __init__(self):
        super(FingerNet,self).__init__()
        self.img_norm = NormalizeModule(m0=0, var0=1)
        print(self.img_norm)
        # feature extraction VGG
        self.conv1 = nn.Sequential(ConvBnPRelu(1, 64, 3), ConvBnPRelu(64, 64, 3), nn.MaxPool2d(2, 2))
        self.conv2 = nn.Sequential(ConvBnPRelu(64, 128, 3), ConvBnPRelu(128, 128, 3), nn.MaxPool2d(2, 2))
        self.conv3 = nn.Sequential(
            ConvBnPRelu(128, 256, 3), ConvBnPRelu(256, 256, 3), ConvBnPRelu(256, 256, 3), nn.MaxPool2d(2, 2)
        )

        # multi-scale ASPP
        self.conv4_1 = ConvBnPRelu(256, 256, 3, padding=1, dilation=1)
        self.ori1 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 90, 1, stride=1, padding=0))
        self.seg1 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 1, 1, stride=1, padding=0))

        self.conv4_2 = ConvBnPRelu(256, 256, 3, padding=4, dilation=4)
        self.ori2 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 90, 1, stride=1, padding=0))
        self.seg2 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 1, 1, stride=1, padding=0))

        self.conv4_3 = ConvBnPRelu(256, 256, 3, padding=8, dilation=8)
        self.ori3 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 90, 1, stride=1, padding=0))
        self.seg3 = nn.Sequential(ConvBnPRelu(256, 128, 1, stride=1, padding=0), nn.Conv2d(128, 1, 1, stride=1, padding=0))

        # enhance part
        gabor_cos, gabor_sin = gabor_bank(enh_ksize=25, ori_stride=2, Lambda=8)

        self.enh_img_real = nn.Conv2d(gabor_cos.size(1), gabor_cos.size(0), kernel_size=(25, 25), padding=12)
        self.enh_img_real.weight = nn.Parameter(gabor_cos, requires_grad=True)
        self.enh_img_real.bias = nn.Parameter(torch.zeros(gabor_cos.size(0)), requires_grad=True)

        self.enh_img_imag = nn.Conv2d(gabor_sin.size(1), gabor_sin.size(0), kernel_size=(25, 25), padding=12)
        self.enh_img_imag.weight = nn.Parameter(gabor_sin, requires_grad=True)
        self.enh_img_imag.bias = nn.Parameter(torch.zeros(gabor_sin.size(0)), requires_grad=True)

        # mnt part
        self.mnt_conv1 = nn.Sequential(ConvBnPRelu(2, 64, 9, padding=4), nn.MaxPool2d(2, 2))
        self.mnt_conv2 = nn.Sequential(ConvBnPRelu(64, 128, 5, padding=2), nn.MaxPool2d(2, 2))
        self.mnt_conv3 = nn.Sequential(ConvBnPRelu(128, 256, 3, padding=1), nn.MaxPool2d(2, 2))
        self.mnt_o = nn.Sequential(ConvBnPRelu(256 + 90, 256, 1, padding=0), nn.Conv2d(256, 180, 1, padding=0))
        self.mnt_w = nn.Sequential(ConvBnPRelu(256, 256, 1, padding=0), nn.Conv2d(256, 8, 1, padding=0))
        self.mnt_h = nn.Sequential(ConvBnPRelu(256, 256, 1, padding=0), nn.Conv2d(256, 8, 1, padding=0))
        self.mnt_s = nn.Sequential(ConvBnPRelu(256, 256, 1, padding=0), nn.Conv2d(256, 1, 1, padding=0))

    def forward(self, input):
        img_norm = self.img_norm(input)

        # feature extraction VGG
        conv1 = self.conv1(img_norm)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)

        # multi-scale ASPP
        conv4_1 = self.conv4_1(conv3)
        ori1 = self.ori1(conv4_1)
        seg1 = self.seg1(conv4_1)

        conv4_2 = self.conv4_2(conv3)
        ori2 = self.ori2(conv4_2)
        seg2 = self.seg2(conv4_2)

        conv4_3 = self.conv4_3(conv3)
        ori3 = self.ori3(conv4_3)
        seg3 = self.seg3(conv4_3)

        ori_out = torch.sigmoid(ori1 + ori2 + ori3)
        seg_out = torch.sigmoid(seg1 + seg2 + seg3)

        # enhance part
        enh_real = self.enh_img_real(input)
        enh_imag = self.enh_img_imag(input)
        ori_peak = orientation_highest_peak(ori_out)
        ori_peak = select_max_orientation(ori_peak)
        ori_up = F.interpolate(ori_peak, scale_factor=8, mode="nearest")
        seg_round = F.softsign(seg_out)
        seg_up = F.interpolate(seg_round, scale_factor=8, mode="nearest")
        enh_real = (enh_real * ori_up).sum(1, keepdim=True)
        enh_imag = (enh_imag * ori_up).sum(1, keepdim=True)
        enh_img = torch.atan2(enh_imag, enh_real)
        enh_seg_img = torch.cat((enh_img, seg_up), dim=1)

        # mnt part
        mnt_conv1 = self.mnt_conv1(enh_seg_img)
        mnt_conv2 = self.mnt_conv2(mnt_conv1)
        mnt_conv3 = self.mnt_conv3(mnt_conv2)

        mnt_o = torch.sigmoid(self.mnt_o(torch.cat((mnt_conv3, ori_out), dim=1)))
        mnt_w = torch.sigmoid(self.mnt_w(mnt_conv3))
        mnt_h = torch.sigmoid(self.mnt_h(mnt_conv3))
        mnt_s = torch.sigmoid(self.mnt_s(mnt_conv3))

        return {
            "enh": enh_real,
            "ori": ori_out,
            "seg": seg_out,
            "mnt_o": mnt_o,
            "mnt_w": mnt_w,
            "mnt_h": mnt_h,
            "mnt_s": mnt_s,
        }

if __name__ == "__main__":
    model = OrientationNet()
    img = cv2.imread('l0.png',0)
    input = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float()
    output = model.get_orientation(input)
    theta = model.draw_orientation_with_arrow(output)
    cv2.imwrite('l0_ori.png',theta)