import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_

class ConvTransformer(nn.Module):
    def __init__(self, input_channels, output_channels, num_heads=8, num_layers=2, feature_size=64):
        super(ConvTransformer, self).__init__()
        # todo: 是否需要 conv
        self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

        self.transformer_encoder_layer = nn.TransformerEncoderLayer(d_model=9*16, nhead=num_heads)
        self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)

        self.transformer_decoder_layer = nn.TransformerDecoderLayer(d_model=9*16, nhead=num_heads)
        self.transformer_decoder = nn.TransformerDecoder(self.transformer_decoder_layer, num_layers=num_layers)

        # todo: 维度调整
        self.fc = nn.Linear(output_channels * feature_size * feature_size, 10)

        self._reset_parameters()

    def _reset_parameters(self):
        xavier_uniform_(self.conv1.weight)
        xavier_uniform_(self.fc.weight)

    def forward(self, x):
        # Convolutional feature extraction
        x = F.relu(self.conv1(x))   # (1, 16, 9, 16)
        batch_size, channels, height, width = x.size()

        # Flatten features into sequence
        x = x.view(batch_size, channels, height * width)    # (1, 16, 9*16)
        x = x.permute(1, 0, 2)  # (16, 1, 9*16)

        # Apply Transformer
        x = self.transformer_encoder(x) # (16, 1, 9*16)
        # tgt = x
        # memory = torch.cat([x, x], dim=0)
        x = self.transformer_decoder(x, x) # (16, 1, 9*16)

        # Reshape back to image feature maps
        x = x.permute(1, 0, 2).view(batch_size, channels, height, width)
        return x



batch_size = 1
channels = 8
height = 9
width = 16
embedding_dim = 256

# 生成虚拟的卷积特征图
conv_features = torch.randn(batch_size, channels, height, width)

model = ConvTransformer(8, 16)
t = model(conv_features)
print(t.shape)

# # 使用自适应平均池化将特征图转换为固定大小的序列
# pooled_features = nn.AdaptiveAvgPool2d(1)(conv_features).squeeze(-1).squeeze(-1)
#
# # 使用线性层将序列投影到期望的嵌入维度
# embedding = nn.Linear(channels, embedding_dim)(pooled_features)
