import torch
from pygments.lexers import math
from torch import nn
from torch.utils.data import Dataset, DataLoader


class PositionEncoder(nn.Module):
    def __init__(self, d_model, max_len):
        super(PositionEncoder, self).__init__()
        self.pe = torch.zeros(max_len, d_model).to(device)
        position = torch.arange(0, max_len,dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0,d_model,2).float()*(-torch.log(torch.tensor(10000.0))/d_model))
        self.pe[:,0::2] = torch.sin(position * div_term)
        self.pe[:,1::2] = torch.cos(position * div_term)
        self.pe = self.pe.unsqueeze(0)

    def forward(self,x):
        return self.pe[:,:x.size(1)]

class MultiHeadAttention(nn.Module):
    def __init__(self,d_model,n_head):
        super(MultiHeadAttention,self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.head_dim = d_model//n_head
        self.q = nn.Linear(d_model,d_model)
        self.k = nn.Linear(d_model,d_model)
        self.v = nn.Linear(d_model,d_model)
        self.out = nn.Linear(d_model,d_model)

    def forward(self,x):
        b,seq_length,d_model = x.shape

        #可以理解为将乘以多头q后的矩阵竖着切开。如果光是view的话意义不对，因为将一排数据切几刀然后做成矩阵，维度不对
        Q = self.q(x).view(b,seq_length,self.n_head,self.head_dim).transpose(1,2)
        K = self.k(x).view(b,seq_length,self.n_head,self.head_dim).transpose(1,2)
        V = self.v(x).view(b,seq_length,self.n_head,self.head_dim).transpose(1,2)

        score = torch.softmax(Q@K.transpose(2,3)/self.head_dim**0.5,dim=1)

        #将（batch,n_head,seq_length,head_dim）数据转化回（batch,seq_length,n_head,head_dim）
        out = score @ V.transpose(1, 2)
        out = out.view(b, seq_length, d_model)
        out = self.out(out)
        return out


class TransformerBlock(nn.Module):
    def __init__(self,d_model,num_heads,dropout=0.1):
        super(TransformerBlock,self).__init__()
        self.attention = MultiHeadAttention(d_model, num_heads)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)

        self.feed_forward = nn.Sequential(nn.Linear(d_model, d_model // 2), nn.ReLU(), nn.Linear(d_model // 2, d_model), )
        self.dropout = nn.Dropout(dropout)

    def forward(self,x):
        attn = self.attention(x)
        x = self.norm1(attn + x)
        x = self.dropout(x)
        forward = self.feed_forward(x)
        out = self.norm2(forward + x)
        return out


class ViT(nn.Module):

    def __init__(self, img_size,patch_size,d_model,num_heads,num_layers,num_classes,max_len):
        super(ViT, self).__init__()
        self.patch_size = patch_size
        self.d_model = d_model
        #卷积算子将图像的信息分块，压缩。每一块都像单词
        self.conv = nn.Conv2d(in_channels=3, out_channels=d_model, kernel_size=patch_size, stride=patch_size)
        self.position_encoder = PositionEncoder(d_model, max_len)
        self.transformer = nn.Sequential(
            *[TransformerBlock(d_model, num_heads) for _ in range(num_layers)]

        )

        self.fc = nn.Linear(d_model, num_classes)

    def forward(self,x):
        x = self.conv(x)
        x = x.flatten(2).transpose(1, 2)
        x = self.position_encoder(x)
        x = self.transformer(x)
        x = self.fc(x)
        return x


class RandomDataset(Dataset):

    def __init__(self,num_samples,img_size,num_classes):
        self.data = torch.randn(num_samples,3,img_size,img_size)
        self.labels = torch.randint(0,num_classes,(num_samples,))

    def __len__(self):
        return len(self.data)

    def __getitem__(self,idx):
        return self.data[idx],self.labels[idx]

num_samples = 10000
img_size = 32
num_classes = 10
random_dataset = RandomDataset(num_samples, img_size, num_classes)
train_loader = DataLoader(random_dataset, batch_size=1, shuffle=True)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
