import paddle
import paddle.nn as nn
from paddle.optimizer import Adam
from paddle import einsum
import numpy as np



class GELU(nn.Layer):
    def __init__(self):
        super(GELU, self).__init__()

    def forward(self, x):
        return 0.5 * x * (1 + paddle.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * paddle.pow(x, 3))))

    @staticmethod
    def gelu(x):
        return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))

#层标准化
class PreNorm(nn.Layer):
    def __init__(self, dim, fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn

    def forward(self, x, **kwargs):
        return self.fn(self.norm(x), **kwargs)

# 前馈神经网络
class FeedForward(nn.Layer):
    def __init__(self, dim, hidden_dim, dropout=0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),  # 第一个线性层
            GELU(),  # GELU激活函数
            nn.Dropout(dropout),  # 以指定的概率进行dropout
            nn.Linear(hidden_dim, dim),  # 第二个线性层
            nn.Dropout(dropout)  # 以指定的概率进行dropout
        )

    def forward(self, x):
        return self.net(x)

# 注意力机制
class Attention(nn.Layer):
    def __init__(self, dim, heads, dim_head=64, dropout=0.):  #1dropout
        super().__init__()
        inner_dim = dim_head * heads
        project_out = not (heads == 1 and dim_head == dim)

        self.heads = heads
        self.scale = dim_head ** -0.5

        self.attend1 = nn.Softmax(axis=-1)

        self.to_qkv = nn.Linear(dim, inner_dim * 3, bias_attr=False)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        ) if project_out else nn.Identity()

        self.cnn3 = nn.Conv2D(2, 2, 3, padding=1)

    def forward(self, x):
        b, n, _, h = *x.shape, self.heads
        qkv = paddle.chunk(self.to_qkv(x), 3, axis=-1)
        q, k, v = [paddle.reshape(t, [b, n, h, -1]) for t in qkv]

        dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale

        attn1 = self.attend1(dots)

        out = einsum('b h i j, b h j d -> b h i d', attn1, v)

        out = paddle.reshape(out, [b, n, -1])

        return self.to_out(out)

# Transformer层
class Transformer(nn.Layer):
    def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.): #1dropout
        super().__init__()
        self.layers = nn.LayerList()
        for _ in range(depth):
            self.layers.append(nn.LayerList([
                PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)),
                PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
            ]))

    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x
        return x

# ViT模型
class ViT(nn.Layer):
    def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool='cls', channels=2,
                 dim_head=64, dropout, emb_dropout):
        super().__init__()
        assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
        self.num_patches = image_size // patch_size
        self.patch_dim = 2
        assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'

        self.LayerNorm = nn.LayerNorm((195, 101, 2))
        self.Unfold = nn.Unfold(kernel_sizes =[1, 1], strides=[1, 1])
        self.Linear = nn.Linear(202, dim)

        # self.to_patch_embedding = nn.Sequential(
        #     nn.LayerNorm((195, 101, 2)),
        #     nn.Unfold(kernel_sizes=[patch_size, patch_size], strides=[patch_size, patch_size]),
        #     nn.Linear(202, dim),
        # )


        self.transformer1 = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
        self.transformer2 = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
        self.transformer3 = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
        self.transformer4 = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)

        self.to_latent = nn.Sequential(
            nn.Linear(202, 195)
        )

        self.mlp_head = nn.Sequential(
            nn.LayerNorm(195),
            nn.Linear(195, num_classes)
        )

        self.dim = dim
        self.reCNN = nn.Sequential(nn.Conv1D(195, 64, 1), GELU(), nn.Conv1D(64, 1, 1), GELU())
        self.to_reCNN = nn.Sequential(
            nn.Linear(195, 1)
        )

    def forward(self, x):
        ######嵌入层##############
        x = self.LayerNorm(x)
        x = self.Unfold(x)
        x = self.Linear(x)
        ######嵌入层##############


        b, n, _ = x.shape

        ##block_large
        # block-1
        x1 = self.transformer1(x)
        # block-2
        x2 = self.transformer2(x1)
        # block-3
        x3 = self.transformer3(x2)
        # block-4
        x = self.transformer4(x3)

        x = self.to_latent(x)

        x = paddle.transpose(x, [0, 2, 1])

        x = self.reCNN(x)

        x = paddle.reshape(x, [len(x), 195])

        x = self.mlp_head(x)

        return x

def create_model():
    return ViT(
        image_size=32,
        patch_size=2,
        num_classes=195,
        dim=202,
        depth=4,
        heads=2,
        mlp_dim=195,
        dropout=0.7,
        emb_dropout=0.7
    )
