import math

import numpy as np
import paddle
import paddle.nn as nn


class LSTM(paddle.nn.Layer):
    def __init__(self, input_band, hidden_band):
        super(LSTM, self).__init__()

        # The input gate
        self.input_gate = nn.Conv2D(input_band + hidden_band, hidden_band, 3, 1, 1)

        # The forget gate
        self.forget_gate = nn.Conv2D(input_band + hidden_band, hidden_band, 3, 1, 1)

        # The cell gate
        self.cell_gate = nn.Conv2D(input_band + hidden_band, hidden_band, 3, 1, 1)

        # The output gate
        self.output_gate = nn.Conv2D(input_band + hidden_band, hidden_band, 3, 1, 1)


    def forward(self, x, h_prev, c_prev):
        x_h = paddle.concat([x, h_prev], axis=1)

        # Compute the input gate
        input_gate = paddle.nn.functional.sigmoid(self.input_gate(x_h))

        # Compute the forget gate
        forget_gate = paddle.nn.functional.sigmoid(self.forget_gate(x_h))

        # Compute the cell gate
        cell_gate = paddle.tanh(self.cell_gate(x_h))

        # Compute the output gate
        output_gate = paddle.nn.functional.sigmoid(self.output_gate(x_h))

        # Update the cell state
        c_new = forget_gate * c_prev + input_gate * cell_gate

        # Compute the new hidden state
        h_new = output_gate * paddle.tanh(c_new)

        return h_new, c_new


class Forward(nn.Layer):
    def __init__(self,
                 band,
                 dropout=0.):
        super().__init__()
        self.down = nn.Conv2D(band, band, 3, 1, 1)
        self.act = nn.GELU()
        self.dropout = nn.Dropout(dropout)


    def forward(self, x):
        x = self.down(x)
        x = self.act(x)
        return x


class Hilo_Attention(nn.Layer):
    def __init__(self, band):
        super().__init__()
        self.avg = nn.AvgPool2D(kernel_size=3, stride=3)
        self.conv_qkv_hi = nn.Conv2D(band, band*3, 3, 1, 1)
        self.conv_qkv_lo = nn.Conv2D(band, band*3, 3, 1, 1)
        self.up = nn.Upsample(mode='BICUBIC', size=[9, 9])
        self.down = nn.Conv2D(band, band, 3, 3)
    

    def hi_qkv(self, x):
        x = self.conv_qkv_hi(x)
        q, k ,v = paddle.chunk(x, 3, axis=1, name=None)
        return q, k ,v
    
    def lo_qkv(self, x):
        x = self.conv_qkv_lo(x)
        q, k ,v = paddle.chunk(x, 3, axis=1, name=None)
        return q, k ,v
     
    def forward(self, x_hi, x_lo):
        q_hi, k_hi, v_hi = self.hi_qkv(x_hi)
        q_lo, k_lo, v_lo = self.hi_qkv(x_lo)
        k_lo_hi = self.up(k_lo)
        k_hi_lo = self.down(k_hi)
        
        attn_hi = q_hi * k_lo_hi
        attn_lo = q_lo * k_hi_lo
        
        x_hi_out = attn_hi * v_hi
        x_lo_out = attn_lo * v_lo
        
        return x_hi_out, x_lo_out


class Transformer(nn.Layer):
    def __init__(self,
                 band,
                 depth,
                 ):
        super().__init__()
        layer_list = []
        self.depth = depth
        self.band = band
        self.transformer = Transformer_Block(band)
        self.layers = nn.LayerList(layer_list)
        self.down1 = nn.Conv2D(band, band, 3, 3)
        self.up = nn.Upsample(mode='BICUBIC', size=[9, 9])
        self.down2 = nn.Conv2D(band*2, band, 3, 1, 1)
        self.down3 = nn.Conv2D(band, 32, 3, 1)
        self.lstm = LSTM(band, band)


    def forward(self, x):
        B = x.shape[0]
        self.hidden = paddle.zeros([B, self.band, 9, 9]) 
        self.cell = paddle.zeros([B, self.band, 9, 9])

        for _ in range(self.depth-1):
            self.hidden, self.cell = self.lstm(x, self.hidden, self.cell)
            x_hi = self.hidden
            x_lo = self.down1(self.hidden)
            x_hi, x_lo = self.transformer(x_hi, x_lo)
            self.hidden = paddle.concat([x_hi, self.up(x_lo)], axis=1)
            self.hidden = self.down2(self.hidden)
        x = self.down3(self.hidden)
        return x


class Transformer_Block(nn.Layer):
    def __init__(self, band):
        super().__init__()
        self.attn = Hilo_Attention(band)
        self.down = nn.Conv2D(3, 3, 1)
        self.attn_norm_hi = nn.BatchNorm2D(band)
        self.attn_norm_lo = nn.BatchNorm2D(band)

        self.ff_norm_hi = nn.BatchNorm2D(band)
        self.ff_norm_lo = nn.BatchNorm2D(band)

        self.ff_hi = Forward(band)
        self.ff_lo = Forward(band)
        

    def forward(self, x_hi, x_lo):
        # 自注意力

        x_hi_bk = x_hi
        x_lo_bk = x_lo 

        x_hi = self.attn_norm_hi(x_hi) 
        x_lo = self.attn_norm_lo(x_lo) 
        x_hi, x_lo = self.attn(x_hi, x_lo)
        x_hi = x_hi + x_hi_bk 
        x_lo = x_lo + x_lo_bk

        # 向前传播 
        x_hi_bk = x_hi
        x_lo_bk = x_lo 
        x_hi = self.ff_norm_hi(x_hi) 
        x_lo = self.ff_norm_lo(x_lo) 
        x_hi = self.ff_hi(x_hi)
        x_lo = self.ff_lo(x_lo)
        x_hi = x_hi + x_hi_bk
        x_lo = x_lo + x_lo_bk

        return x_hi, x_lo
        

class ViT(nn.Layer): 
    def __init__(self,band, image_size=9, num_patches=826, num_classes=4, dim=80,  pool='cls', emb_dropout=0.):
        super().__init__()
        self.name = 'HiLo'
        patch_dim = image_size ** 2 
        self.pos_embedding = paddle.create_parameter(shape=[1, num_patches, dim],
                                                    dtype='float32',
                                                    default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02))

        self.patch_to_embedding = nn.Conv2D(num_patches, band, 3, 1, 1)
        self.cls_token = paddle.create_parameter(shape=[1, 1, dim],
                                                dtype='float32',
                                                default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
        
        self.dropout = nn.Dropout(emb_dropout)
        self.transformer = Transformer(band, 4)

        self.pool = pool
        self.to_latent = nn.Identity()

        self.mlp_head = nn.Sequential(
            nn.Linear(1568, 32),
            nn.BatchNorm(32),
            nn.Linear(32, num_classes)
        )
        
        
    def forward(self, x):
        x = self.patch_to_embedding(x) #[b,n,dim]\
        x = self.transformer(x)
        x = x.flatten(1)
        return self.mlp_head(x)

        
if __name__ == '__main__':
    # import numpy as np
    # t = np.array([1,2,3,4])
    import os

    import numpy as np
    os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
    paddle.set_device('cpu')
    model = ViT(40)
    t1 = paddle.standard_normal(shape=[128, 826, 9, 9])
    # t2 = paddle.standard_normal(shape=[10, 80, 3, 3])
    # t1 = paddle.to_tensor(t1)
    out1 = model(t1)
    print(out1.shape)
    print('end')
