# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import paddle
import paddle.nn as nn
from paddle import ParamAttr
import paddle.nn.functional as F
import numpy as np

from paddle.nn.initializer import TruncatedNormal, Constant
from paddle import ParamAttr
import paddle.nn.functional as F  
import math
trunc_normal_ = TruncatedNormal(std=.02)
zeros_ = Constant(value=0.)
ones_ = Constant(value=1.)

from .rec_att_head import AttentionGRUCell

def drop_path(x, drop_prob=0., training=False):
    """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
    """
    if drop_prob == 0. or not training:
        return x
    keep_prob = paddle.to_tensor(1 - drop_prob, dtype=x.dtype)
    shape = (paddle.shape(x)[0], ) + (1, ) * (x.ndim - 1)
    random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype)
    random_tensor = paddle.floor(random_tensor)  # binarize
    output = x.divide(keep_prob) * random_tensor
    return output

class DropPath(nn.Layer):
    """Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).
    """

    def __init__(self, drop_prob=None):
        super(DropPath, self).__init__()
        self.drop_prob = drop_prob

    def forward(self, x):
        return drop_path(x, self.drop_prob, self.training)

def get_para_bias_attr(l2_decay, k):
    if l2_decay > 0:
        regularizer = paddle.regularizer.L2Decay(l2_decay)
        stdv = 1.0 / math.sqrt(k * 1.0)
        initializer = nn.initializer.Uniform(-stdv, stdv)
    else:
        regularizer = None
        initializer = None
    weight_attr = ParamAttr(regularizer=regularizer, initializer=initializer)
    bias_attr = ParamAttr(regularizer=regularizer, initializer=initializer)
    return [weight_attr, bias_attr]

class TableAttentionHead(nn.Layer):
    def __init__(self,
                 in_channels,
                 hidden_size,
                 in_max_len=488,
                 max_text_length=800,
                 out_channels=30,
                 loc_reg_num=4,
                 **kwargs):
        super(TableAttentionHead, self).__init__()
        self.input_size = in_channels[-1]
        self.hidden_size = hidden_size
        self.out_channels = out_channels
        self.max_text_length = max_text_length

        self.structure_attention_cell = AttentionGRUCell(
            self.input_size, hidden_size, self.out_channels, use_gru=False)
        self.structure_generator = nn.Linear(hidden_size, self.out_channels)
        self.in_max_len = in_max_len

        if self.in_max_len == 640:
            self.loc_fea_trans = nn.Linear(400, self.max_text_length + 1)
        elif self.in_max_len == 800:
            self.loc_fea_trans = nn.Linear(625, self.max_text_length + 1)
        else:
            self.loc_fea_trans = nn.Linear(256, self.max_text_length + 1)
        self.loc_generator = nn.Linear(self.input_size + hidden_size, loc_reg_num)

    def _char_to_onehot(self, input_char, onehot_dim):
        input_ont_hot = F.one_hot(input_char, onehot_dim)
        return input_ont_hot

    def forward(self, inputs, targets=None):
        # if and else branch are both needed when you want to assign a variable
        # if you modify the var in just one branch, then the modification will not work.
        fea = inputs[-1]
        last_shape = int(np.prod(fea.shape[2:]))  # gry added
        fea = paddle.reshape(fea, [fea.shape[0], fea.shape[1], last_shape])
        fea = fea.transpose([0, 2, 1])  # (NTC)(batch, width, channels)
        batch_size = fea.shape[0]

        hidden = paddle.zeros((batch_size, self.hidden_size))
        output_hiddens = paddle.zeros(
            (batch_size, self.max_text_length + 1, self.hidden_size))
        if self.training and targets is not None:
            structure = targets[0]
            for i in range(self.max_text_length + 1):
                elem_onehots = self._char_to_onehot(
                    structure[:, i], onehot_dim=self.out_channels)
                (outputs, hidden), alpha = self.structure_attention_cell(
                    hidden, fea, elem_onehots)
                output_hiddens[:, i, :] = outputs
            structure_probs = self.structure_generator(output_hiddens)
            loc_fea = fea.transpose([0, 2, 1])
            loc_fea = self.loc_fea_trans(loc_fea)
            loc_fea = loc_fea.transpose([0, 2, 1])
            loc_concat = paddle.concat([output_hiddens, loc_fea], axis=2)
            loc_preds = self.loc_generator(loc_concat)
            loc_preds = F.sigmoid(loc_preds)
        else:
            temp_elem = paddle.zeros(shape=[batch_size], dtype="int32")
            structure_probs = None
            loc_preds = None
            elem_onehots = None
            outputs = None
            alpha = None
            max_text_length = paddle.to_tensor(self.max_text_length)
            for i in range(max_text_length + 1):
                elem_onehots = self._char_to_onehot(
                    temp_elem, onehot_dim=self.out_channels)
                (outputs, hidden), alpha = self.structure_attention_cell(
                    hidden, fea, elem_onehots)
                output_hiddens[:, i, :] = outputs
                structure_probs_step = self.structure_generator(outputs)
                temp_elem = structure_probs_step.argmax(axis=1, dtype="int32")

            structure_probs = self.structure_generator(output_hiddens)
            structure_probs = F.softmax(structure_probs)
            loc_fea = fea.transpose([0, 2, 1])
            loc_fea = self.loc_fea_trans(loc_fea)
            loc_fea = loc_fea.transpose([0, 2, 1])
            loc_concat = paddle.concat([output_hiddens, loc_fea], axis=2)
            loc_preds = self.loc_generator(loc_concat)
            loc_preds = F.sigmoid(loc_preds)
        return {'structure_probs': structure_probs, 'loc_preds': loc_preds}

class Block(nn.Layer):
    def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
        super().__init__()
        self.dwconv_1 = nn.Conv1D(dim, dim,  121, padding=  60, groups=dim)  # depthwise conv
        self.norm = nn.LayerNorm(dim, epsilon=1e-6)
        self.pwconv1 = nn.Linear(dim, 4 * dim)
        self.act = nn.GELU()
        self.pwconv2 = nn.Linear(4 * dim, dim)
        self.gamma = self.create_parameter(
            shape=[dim],
            default_initializer=Constant(value=layer_scale_init_value))
        self.drop_path = DropPath(
            drop_path) if drop_path > 0. else nn.Identity()
        self.apply(self._init_weights)
    
    def _init_weights(self, m):
        if isinstance(m, (nn.Conv1D, nn.Linear)):
            trunc_normal_(m.weight)
            if m.bias is not None:
                zeros_(m.bias)
    
    def forward(self, x):
        input = x
        x = self.dwconv_1(x)
        x = x.transpose([0, 2, 1]) # (N, C, L) -> (N, L, C)
        x = self.norm(x)
        x = self.pwconv1(x)
        x = self.act(x)
        x = self.pwconv2(x)
        x = self.gamma * x
        x = x.transpose([0, 2, 1])  # (N, L, C) -> (N, C, L)
        x = input + self.drop_path(x)
        return x   
    
class D_Block(nn.Layer):
    def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
        super().__init__()
        self.dwconv1_1 = nn.Conv1D(dim, dim,  121, padding=  60, groups=dim)  # depthwise conv
        self.norm1 = nn.LayerNorm(dim, epsilon=1e-6)
        self.pwconv1 = nn.Linear(dim, 4 * dim)
        self.act1 = nn.GELU()
        self.pwconv2 = nn.Linear(4 * dim, dim)
        self.fuse_conv = nn.Linear(256, 501)
        self.dwconv2_1 = nn.Conv1D(dim, dim,  121, padding=  60, groups=dim)  # depthwise conv
        self.norm2 = nn.LayerNorm(dim, epsilon=1e-6)
        self.pwconv3 = nn.Linear(dim, 4 * dim)
        self.act2 = nn.GELU()
        self.pwconv4 = nn.Linear(4 * dim, dim)
        self.gamma1 = self.create_parameter(
                shape=[dim],
                default_initializer=Constant(value=layer_scale_init_value))
        self.gamma2 = self.create_parameter(
            shape=[dim],
            default_initializer=Constant(value=layer_scale_init_value))
        self.drop_path = DropPath(
            drop_path) if drop_path > 0. else nn.Identity()
        self.apply(self._init_weights)
        
    def _init_weights(self, m):
        if isinstance(m, (nn.Conv1D, nn.Linear)):
            trunc_normal_(m.weight)
            if m.bias is not None:
                zeros_(m.bias)
        
    def forward(self, x, q, y):
        input = x 
        x = x + q
        x = self.dwconv1_1(x)
        x = x.transpose([0, 2, 1])  # (N, C, L) -> (N, L, C)
        x = self.norm1(x)
        x = self.pwconv1(x)
        x = self.act1(x)
        x = self.pwconv2(x)
        x = self.gamma1 * x
        x = x.transpose([0, 2, 1])   # (N, L, C) -> (N, C, L)
        x = input + self.drop_path(x)
        
        input = x
        y = self.fuse_conv(y)
        x = x + y
        x = self.dwconv2_1(x)
        x = x + input
        x = x.transpose([0, 2, 1])  # (N, C, L) -> (N, L, C)
        x = self.norm2(x)
        
        input = x.transpose([0, 2, 1])
        x = self.pwconv3(x)
        x = self.act2(x)
        x = self.pwconv4(x)
        x = self.gamma2 * x
        x = x.transpose([0, 2, 1])  # (N, L, C) -> (N, C, L)
        x = input + self.drop_path(x)
        return x      
    
class SLAHead(nn.Layer):
    def __init__(self,
                 in_channels,
                 hidden_size,
                 out_channels=30,
                 max_text_length=500,
                 loc_reg_num=4,
                 fc_decay=0.0,
                 **kwargs):
        """
        @param in_channels: input shape
        @param hidden_size: hidden_size for RNN and Embedding
        @param out_channels: num_classes to rec
        @param max_text_length: max text pred
        """
        super().__init__()
        in_channels = in_channels[-1]
        self.hidden_size = hidden_size
        self.max_text_length = max_text_length
        self.emb = self._char_to_onehot
        self.num_embeddings = out_channels
        self.loc_reg_num = loc_reg_num
        
        # structure
        self.structure_attention_cell = AttentionGRUCell(
            in_channels, hidden_size, self.num_embeddings)
        weight_attr, bias_attr = get_para_bias_attr(
            l2_decay=fc_decay, k=hidden_size)
        weight_attr1_1, bias_attr1_1 = get_para_bias_attr(
            l2_decay=fc_decay, k=hidden_size)
        weight_attr1_2, bias_attr1_2 = get_para_bias_attr(
            l2_decay=fc_decay, k=hidden_size)
        self.structure_generator = nn.Sequential(
            nn.Linear(
                self.hidden_size,
                self.hidden_size,
                weight_attr=weight_attr1_2,
                bias_attr=bias_attr1_2),
            nn.Linear(
                hidden_size,
                out_channels,
                weight_attr=weight_attr,
                bias_attr=bias_attr))
        # loc
        weight_attr1, bias_attr1 = get_para_bias_attr(
            l2_decay=fc_decay, k=self.hidden_size)
        weight_attr2, bias_attr2 = get_para_bias_attr(
            l2_decay=fc_decay, k=self.hidden_size)
        self.loc_generator = nn.Sequential(
            nn.Linear(
                self.hidden_size,
                self.hidden_size,
                weight_attr=weight_attr1,
                bias_attr=bias_attr1),
            nn.Linear(
                self.hidden_size,
                loc_reg_num,
                weight_attr=weight_attr2,
                bias_attr=bias_attr2),
            nn.Sigmoid())
        self.query_embed = nn.Embedding(501, 96)

        self.N_T = N_T = 3 ## 6
        enc_layers = []
        for i in range(0, N_T):
            enc_layers.append(Block(96))
        dec_layers = []
        for i in range(0, N_T):
            dec_layers.append(D_Block(96)) ##  drop == 0.1 none
        self.enc = nn.Sequential(*enc_layers)
        self.dec = nn.Sequential(*dec_layers)
        
    def forward(self, inputs, targets=None):
        fea = inputs[-1]
        batch_size = fea.shape[0]
        # reshape
        fea = paddle.reshape(fea, [fea.shape[0], fea.shape[1], -1]) # 48, 96, 256
        z = fea # 48, 96, 256 --> 48, 96, 501
        for i in range(self.N_T):
            z = self.enc[i](z)
        out = paddle.zeros((fea.shape[0], 96, 501))
        query_embed = self.query_embed.weight.transpose([1, 0]).unsqueeze(1).repeat_interleave(fea.shape[0], axis = 1).transpose([1, 0, 2])
        for i in range(0, self.N_T):
            out = self.dec[i](out, query_embed, z)
        out = out.transpose([0, 2, 1])
        fea = fea.transpose([0, 2, 1]) # (NTC)(batch, width, channels) 48, 256, 96
        
        hidden = paddle.zeros((batch_size, self.hidden_size))
        structure_preds = paddle.zeros(
            (batch_size, self.max_text_length + 1, self.num_embeddings))
        loc_preds = paddle.zeros(
            (batch_size, self.max_text_length + 1, self.loc_reg_num))
        structure_preds.stop_gradient = True
        loc_preds.stop_gradient = True
        if self.training and targets is not None:
            structure = targets[0]
            for i in range(self.max_text_length + 1):
                latent = out[:, i:i+1,:]
                hidden, structure_step, loc_step = self._decode(structure[:, i],
                                                                fea, hidden, latent, batch_size)
                structure_preds[:, i, :] = structure_step
                loc_preds[:, i, :] = loc_step
        else:
            pre_chars = paddle.zeros(shape=[batch_size], dtype="int32")
            max_text_length = paddle.to_tensor(self.max_text_length)
            # for export
            loc_step, structure_step = None, None
            for i in range(max_text_length + 1):
                latent = out[:, i:i+1,:]
                hidden, structure_step, loc_step = self._decode(pre_chars, fea,
                                                                hidden, latent, batch_size)
                pre_chars = structure_step.argmax(axis=1, dtype="int32")
                structure_preds[:, i, :] = structure_step
                loc_preds[:, i, :] = loc_step
        if not self.training:
            structure_preds = F.softmax(structure_preds)
        return {'structure_probs': structure_preds, 'loc_preds': loc_preds}

    def _decode(self, pre_chars, features, hidden, latent, batch_size):
        """
        Predict table label and coordinates for each step
        @param pre_chars: Table label in previous step
        @param features:
        @param hidden: hidden status in previous step
        @return:
        """
        emb_feature = self.emb(pre_chars)
        # output shape is b * self.hidden_size
        (output, hidden), alpha   = self.structure_attention_cell(
            hidden, features, emb_feature, latent, batch_size)

        # structure
        structure_step = self.structure_generator(output)
        # loc
        loc_step = self.loc_generator(output)
        return hidden, structure_step, loc_step

    def _char_to_onehot(self, input_char):
        input_ont_hot = F.one_hot(input_char, self.num_embeddings)
        return input_ont_hot

