#-*- coding:utf-8 -*-
# author:贤宁
# datetime:2021/11/10 9:50
# software: PyCharm

#自定义本文中的循环卷积神经网络
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as functional
from typing import Union, List, Tuple
import torch.nn.functional as F

#二维循环卷积神经网络
class ConvGRU2DCell(nn.Module):
    def __init__(self,input_channels,hidden_channels,kernel_size,stride=1,padding=0,recurrent_kernel_size=3):
        """
        :param input_channels:
        :param hidden_channels:
        :param kernel_size:
        :param strid:
        :param padding:
        :param recurrent_kernel_size:
        """
        super(ConvGRU2DCell, self).__init__()
        #
        if isinstance(recurrent_kernel_size, int):
            recurrent_kernel_size = (recurrent_kernel_size,) * 2
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size,) * 2
        if isinstance(stride, int):
            stride = (stride,) * 2
        if isinstance(padding, int):
            padding = (padding,) * 2

        self.kernel_size = kernel_size
        self.stride = stride
        self.h_channels = hidden_channels
        self.padding_ih = padding
        self.padding_hh = (
            recurrent_kernel_size[0] // 2,
            recurrent_kernel_size[1] // 2,
        )
        self.weight_ih = nn.Parameter(
            torch.ones(
                hidden_channels * 3,
                input_channels,
                kernel_size[0],
                kernel_size[1],
            ),
            requires_grad=True,
        )
        #print(self.weight_ih.size())
        self.weight_hh = nn.Parameter(
            torch.ones(
                hidden_channels * 3,
                input_channels,
                recurrent_kernel_size[0],
                recurrent_kernel_size[1],
            ),
            requires_grad=True,
        )
        self.bias_ih = nn.Parameter(torch.zeros(hidden_channels * 3), requires_grad=True)
        self.bias_hh = nn.Parameter(torch.zeros(hidden_channels * 3), requires_grad=True)
        # ----------------------------------------------------------------------
        self.reset_parameters()

    def reset_parameters(self):
        init.orthogonal_(self.weight_hh)
        init.xavier_uniform_(self.weight_ih)
        init.zeros_(self.bias_hh)
        init.zeros_(self.bias_ih)

    def forward(self, input, hx=None):
        output_size = (
            int((input.size(-2) - self.kernel_size[0] + 2 * self.padding_ih[0]) / self.stride[0])
            + 1,
            int((input.size(-1) - self.kernel_size[1] + 2 * self.padding_ih[1]) / self.stride[1])
            + 1,
        )
        # Handle the case of no hidden state provided
        if hx is None:
            hx = torch.zeros(input.size(0), self.h_channels, *output_size, device=input.device)
        # Run the optimized convgru-cell
        print(input.size())
        print(hx.size())
        print(self.h_channels)
        print(self.weight_ih.size())
        print(self.weight_hh.size())
        print(self.bias_ih.size())
        print(self.bias_hh.size())
        return _opt_convgrucell_2d(
            input,
            hx,
            self.h_channels,
            self.weight_ih,
            self.weight_hh,
            self.bias_ih,
            self.bias_hh,
            self.stride,
            self.padding_ih,
            self.padding_hh,
        )

class ConvGRU(nn.Module):
    def __init__(self,
        input_channels,
        hidden_channels,
        kernel_size,
        stride,  #1
        padding,  #0
        recurrent_kernel_size=3):

        super(ConvGRU, self).__init__()
        self.rnn_cell = ConvGRU2DCell(
            input_channels, hidden_channels, kernel_size, stride, padding, recurrent_kernel_size
        )

    def forward(self, input, hidden_state=None):
        output = []
        for step in range(input.size(1)):
            # Compute current time-step
            # print(input[:, step, :, :, :].size())
            # print(hidden_state.size())
            hidden_state = self.rnn_cell(input[:, step, :, :, :], hidden_state)
            output.append(hidden_state)
        # Stack the list of output hidden states into a tensor
        output = torch.stack(output, 0)
        return output


#优化单元函数
@torch.jit.script

def _opt_cell_end(hidden, ih_1, hh_1, ih_2, hh_2, ih_3, hh_3):
    z = torch.sigmoid(ih_1 + hh_1)
    r = torch.sigmoid(ih_2 + hh_2)
    n = torch.tanh(ih_3 + r * hh_3)
    out = (1 - z) * n + z * hidden
    return out


@torch.jit.script
def _opt_convgrucell_1d(
    inputs,
    hidden,
    channels: int,
    w_ih,
    w_hh,
    b_ih,
    b_hh,
    stride: int,
    pad1: int,
    pad2: int,
):
    #inputs:768,hidden:384
    ih_output = functional.conv1d(inputs, w_ih, bias=b_ih, stride=stride, padding=pad1)
    hh_output = functional.conv1d(hidden, w_hh, bias=b_hh, stride=1, padding=pad2)
    output = _opt_cell_end(
        hidden,
        torch.narrow(ih_output, 1, 0, channels),  #ih1
        torch.narrow(hh_output, 1, 0, channels),  #hh1
        torch.narrow(ih_output, 1, channels, channels), #ih2
        torch.narrow(hh_output, 1, channels, channels), #hh2
        torch.narrow(ih_output, 1, 2 * channels, channels), #ih3
        torch.narrow(hh_output, 1, 2 * channels, channels), #hh3
    )
    return output


@torch.jit.script
def _opt_convgrucell_2d(
    inputs,
    hidden,
    channels: int,
    w_ih,
    w_hh,
    b_ih,
    b_hh,
    stride: List[int],
    pad1: List[int],
    pad2: List[int],
):
    ih_output = functional.conv2d(inputs, w_ih, bias=b_ih, stride=stride, padding=pad1)
    hh_output = functional.conv2d(hidden, w_hh, bias=b_hh, stride=1, padding=pad2)
    print(torch.narrow(ih_output, 1, 0, channels).size())
    print(torch.narrow(hh_output, 1, 0, channels).size())
    print(torch.narrow(ih_output, 1, channels, channels).size())
    print(torch.narrow(hh_output, 1, channels, channels).size())
    print(torch.narrow(ih_output, 1, 2 * channels, channels).size())
    print(torch.narrow(hh_output, 1, 2 * channels, channels).size())
    output = _opt_cell_end(
        hidden,
        torch.narrow(ih_output, 1, 0, channels),   #ih1
        torch.narrow(hh_output, 1, 0, channels),   #hh1
        torch.narrow(ih_output, 1, channels, channels),  #ih2
        torch.narrow(hh_output, 1, channels, channels),   #hh2
        torch.narrow(ih_output, 1, 2 * channels, channels), #ih3
        torch.narrow(hh_output, 1, 2 * channels, channels), #hh3
    )
    return output