# _*_ coding : utf-8  _*_
# @Time : 2025/3/10 00:14
# @Author : 马梓航
# @File : vit_framework
# @Project : VIT（VIT各种版本实现+小马实现版

import torch
from torch import nn
import torch.nn.functional as F

from einops import rearrange,repeat
from einops.layers.torch import Rearrange

#helpers

def pair(t):
    return t if isinstance(t,tuple) else (t,t)

#classes

class PreNorm(nn.Module):

    def __init__(self,model_dim,network):
        super().__init__()
        self.norm=nn.LayerNorm(model_dim)
        self.network=network

    def forward(self,x):
        return self.network(self.norm(x))

class Attention(nn.Module):

    def __init__(self,model_dim,heads,dropout,head_dim):
        super().__init__()

        self.head_dim=head_dim
        self.inner_dim=heads*head_dim
        self.heads=heads
        self.to_qkv=nn.Linear(model_dim,self.inner_dim*3,bias=False)
        self.scale=head_dim**-0.5

        self.softmax=nn.Softmax(-1)#取-1就是指对每一行都应用softmax

        self.to_out=nn.Sequential(
            nn.Linear(self.inner_dim,model_dim),
            nn.Dropout(dropout)
        )
        self.wo=nn.Linear(self.inner_dim,self.inner_dim)

    def forward(self,x):
        b,n,m=x.shape

        #得到q、k、v有许多种方法
        #1 : get qkv with chunk and rearrange
        # qkv=self.to_qkv(x).chunk(3,dim=2)#qkv此时是一个tuple
        # q,k,v=map(lambda t:rearrange(t,"bs np (heads md)->bs heads np md",heads=self.heads),qkv)
        #2 : get qkv with reshape and permute
        qkv=self.to_qkv(x).reshape(b,n,3,self.heads,self.head_dim).permute(2,0,3,1,4)#qkv此时是一个五维的list
        q,k,v=qkv[0],qkv[1],qkv[2]

        dots=torch.matmul(q,k.transpose(-1,-2))*self.scale#只在最后两维进行操作
        #also can:dots=(q@k.transpose(-1,-2))*self.scale#只在最后两维进行操作

        attn=self.softmax(dots)

        out=torch.matmul(attn,v)
        #also can:out=attn@v
        out=rearrange(out,"bs h np md->bs np (h md)")
        out=self.wo(out)#wo是为了让他们更好的融合起来
        return self.to_out(out)

class FeedForward(nn.Module):

    def __init__(self,model_dim,mlp_dims,dropout):
        super().__init__()
        self.network=nn.Sequential(
            nn.Linear(model_dim,mlp_dims),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(mlp_dims,model_dim),
            nn.Dropout(dropout)
        )

    def forward(self,x):
        return self.network(x)

class Transformer(nn.Module):

    def __init__(self,model_dim,depth,heads,dropout,mlp_dims,head_dim):
        super().__init__()
        #self.layers的堆叠有两种方法
        #1 :
        # self.layers=nn.ModuleList([])
        # for _ in range(depth):
        #     self.layers.append(nn.ModuleList([
        #         PreNorm(model_dim,Attention(model_dim,heads,dropout,head_dim)),
        #         PreNorm(model_dim,FeedForward(model_dim,mlp_dims,dropout))
        #     ]))
        #2 :
        #这个方法只适用于nn.Sequential里面只有一个层需要循环的，遂要利用一下nn.ModuleList
        self.layer=nn.ModuleList([
                PreNorm(model_dim,Attention(model_dim,heads,dropout,head_dim)),
                PreNorm(model_dim,FeedForward(model_dim,mlp_dims,dropout))
            ])
        self.layers=nn.Sequential(*[
            self.layer
            for _ in range(depth)
        ])

    def forward(self,x):
        for attn,fn in self.layers:
            x=attn(x)+x
            x=fn(x)+x
        return x

class vit(nn.Module):
    def __init__(self,  batch_size ,image_size, patch_size, num_classes, model_dim,max_length_sequence,depth,heads,pool,channels,mlp_dims,dropout,emb_dropout):
        super().__init__()
        image_height,image_width=pair(image_size)
        patch_height,patch_width=pair(patch_size)

        assert image_height%patch_height==0 and image_width%patch_width==0,"image sizes must be divisible by the patch sizes"

        nums_patchs=(image_height/patch_height)*(image_width/patch_width)#14*14=196
        patch_dim=patch_width*patch_height*channels#16*16*3=768

        assert pool in ("cls","mean"),"pool type must be either \"cls\" or \"mean\""

        #image2embedding的方法一的实现一，先分块然后变化维度
        self.iamge2embedding_1_1=nn.Sequential(
            Rearrange("b c (h ph) (w pw) -> b (h w) (ph pw c)",ph=patch_height,pw=patch_width),
            nn.Linear(patch_dim,model_dim)
        )

        # image2embedding的方法二，直接卷积，再变换维度
        # self.iamge2embedding_2_1 =nn.Sequential(
        #     nn.Conv2d(in_channels=channels,out_channels=patch_dim, kernel_size=patch_size, stride=patch_size),
        #     Rearrange("b oc oh ow -> b (oh ow) oc",oh=int(image_height/patch_height),ow=int(image_width/patch_width)),#oh和ow必须为整型
        #     nn.Linear(patch_dim, model_dim)
        # )

        #weight for 1_2 and 2_2
        # self.weight_for_1_2=torch.randn(patch_dim,model_dim,requires_grad=True)
        # self.weight_for_2_2=self.weight_for_1_2.transpose(-1,-2).reshape((model_dim,channels,patch_size,patch_size))

        #cls_token for 1
        # self.cls_token_1=torch.randn(1,1,model_dim,requires_grad=True)

        # cls_token for 3
        self.cls_token_3=torch.randn(1,1,model_dim,requires_grad=True)

        #random position embedding matrix
        self.position_embedding=torch.randn(batch_size,max_length_sequence,model_dim,requires_grad=True)

        #dropout for emb
        self.dropout=nn.Dropout(emb_dropout)

        #paras
        self.nhead=heads
        self.drop=dropout
        self.patch_size=patch_size
        self.depth=depth
        self.mlp_diims=mlp_dims
        self.pool=pool

        #实例化手搓的encoder
        self.transformer=Transformer(model_dim,depth,heads,dropout,mlp_dims,int(model_dim/heads))

        self.mlp_head=nn.Sequential(
            nn.LayerNorm(model_dim),
            nn.Linear(model_dim,num_classes)
        )

    # image2embedding的方法一的实现二，先分块然后变化维度
    # def iamge2embedding_1_2(self,image):
    #     patch=F.unfold(image,self.patch_size,stride=self.patch_size).transpose(-1,-2)
    #     patch=patch@self.weight_for_1_2
    #     return patch

    # image2embedding的方法二，直接卷积，再变换维度
    # def iamge2embedding_2_2(self,image):
    #     patch=F.conv2d(image,weight=self.weight_for_2_2,stride=self.patch_size).flatten(2).transpose(-1,-2)
    #     return patch

    def forward(self,image):

        #step1 : image2embedding
        #1-1
        x=self.iamge2embedding_1_1(image)
        #1-2
        # x=self.iamge2embedding_1_2(image)
        #2-1
        # x=self.iamge2embedding_2_1(image)
        #2-2
        # x=self.iamge2embedding_2_2(image)

        bs,np,md=x.shape

        #step2 : add cls token to embedding
        #1 : 先创建一个(1,1,model_dim)维的矩阵，然后在在第一维上复制batch_size遍
        # cls_tokens=repeat(self.cls_token_1,"() np md -> bs np md",bs=bs)
        #2 : 直接生成一个(bs,1,model_dim)的矩阵
        #2-1 : 第一种生成方法
        # cls_tokens=torch.randn(bs,1,md,requires_grad=True)
        # 2-2 : 第二种生成方法
        # cls_tokens=nn.Parameter(torch.randn(bs,1,md))
        # 2-3 : 第三种生成方法
        cls_tokens=self.cls_token_3.expand(bs,-1,-1)
        x=torch.cat((cls_tokens,x),dim=1)
        #also can : x=torch.cat([cls_tokens,x],dim=1)

        #step3 : add position embedding to embedding
        seq_len=x.shape[1]
        #第一种裁剪张量的方法
        # position_embedding=self.position_embedding[:,:seq_len,:]
        # 第二种裁剪张量的方法
        position_embedding=torch.tile(self.position_embedding[:,:seq_len,:],dims=[bs,1,1])
        x+=position_embedding
        x=self.dropout(x)

        #step4 : send embedding to encoders
        #Encoder_1 : 直接使用pytorch自带的框架，但是vit模型的框架其实与传统的trans有一些不同，
        #比如在vit中是先norm，再多头注意力层，再残差连接，在方法二中我们会做注意力层的具体实现
        # encoder_layer=nn.TransformerEncoderLayer(d_model=model_dim,nhead=self.nhead,dropout=self.drop,norm_first=True,dim_feedforward=self.mlp_diims)
        # encoders=nn.TransformerEncoder(encoder_layer,self.depth)
        # encoder_output=encoders(x)
        #Encoder_2 : 手搓encoder
        encoder_output=self.transformer(x)

        # step5 : send output to final_mlp_heads
        output=self.mlp_head(encoder_output)

        #step6 : pool
        output=output.mean(dim=1) if self.pool=="mean" else output[:,0,:]
        print(output.shape)
        return output

#在一个函数中创建vit模型，方便import
def thisisvit():
    visionTransformer=vit(
        1,
        224,
        16,
        1000,
        768,
        200,
        12,
        12,
        "cls",
        3,
        3072,
        0.1,
        0.1
    )
    return visionTransformer

a=thisisvit()
b=torch.randn(1,3,224,224)
print(a(b).shape)