import torch
import torch.nn as nn
import torch.nn.functional as F

# ------------------------------
# 文本编码器，返回所有时刻的 hidden_states 以及最后的 hidden
# ------------------------------
class TextEncoder(nn.Module):
    def __init__(self, vocab_size, embed_dim=256, hidden_dim=512, num_layers=1):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(
            embed_dim,
            hidden_dim,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=False
        )

    def forward(self, input_ids, attention_mask=None):
        """
        input_ids: (B, seq_len)
        returns:
          encoder_outputs: (B, seq_len, hidden_dim)
          final_hidden:   (B, hidden_dim)
        """
        x = self.embedding(input_ids)                # (B, seq_len, embed_dim)
        outputs, (h_n, c_n) = self.lstm(x)           # outputs: (B, seq_len, hidden_dim)
        final_hidden = h_n[-1]                       # 最后一层 LSTM 的 hidden: (B, hidden_dim)
        return outputs, final_hidden                 # encoder_outputs, final_hidden


# ------------------------------
# 加性注意力
# ------------------------------
class AdditiveAttention(nn.Module):
    def __init__(self, hidden_dim):
        super().__init__()
        self.W_h = nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.W_s = nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.v   = nn.Linear(hidden_dim, 1, bias=False)

    def forward(self, encoder_outputs, decoder_state):
        """
        encoder_outputs: (B, seq_len, hidden_dim)
        decoder_state:   (B, hidden_dim)
        returns:
          context:      (B, hidden_dim)
          attn_weights: (B, seq_len)
        """
        s = decoder_state.unsqueeze(1)  # (B,1,hidden_dim)
        score = self.v(torch.tanh(self.W_h(encoder_outputs) + self.W_s(s))).squeeze(-1)  # (B,seq_len)
        alpha = torch.softmax(score, dim=1)  # (B, seq_len)
        context = torch.bmm(alpha.unsqueeze(1), encoder_outputs).squeeze(1)  # (B, hidden_dim)
        return context, alpha


# ------------------------------
# 图像解码器，接收拼接了 context 的特征向量，将 4×4 → 64×64
# ------------------------------
class ImageDecoder(nn.Module):
    def __init__(self, hidden_dim=512, fmap_size=64):
        super().__init__()
        # 输入是 hidden_dim*2 → 512*2 = 1024
        # 映射成 512×4×4 的初始特征图
        self.fc = nn.Linear(hidden_dim * 2, fmap_size * 8 * 4 * 4)
        # 4 层转置卷积：4→8→16→32→64
        self.deconv = nn.Sequential(
            nn.BatchNorm2d(fmap_size * 8),
            nn.ReLU(inplace=True),
            # 4×4 → 8×8
            nn.ConvTranspose2d(fmap_size * 8, fmap_size * 4, 4, 2, 1),
            nn.BatchNorm2d(fmap_size * 4),
            nn.ReLU(inplace=True),

            # 8×8 → 16×16
            nn.ConvTranspose2d(fmap_size * 4, fmap_size * 2, 4, 2, 1),
            nn.BatchNorm2d(fmap_size * 2),
            nn.ReLU(inplace=True),

            # 16×16 → 32×32
            nn.ConvTranspose2d(fmap_size * 2, fmap_size, 4, 2, 1),
            nn.BatchNorm2d(fmap_size),
            nn.ReLU(inplace=True),

            # 32×32 → 64×64
            nn.ConvTranspose2d(fmap_size, 3, 4, 2, 1),
            nn.Tanh()
        )

    def forward(self, combined_feat):
        """
        combined_feat: (B, hidden_dim*2)
        returns:       (B, 3, 64, 64)
        """
        x = self.fc(combined_feat)           # (B, 512*4*4)
        x = x.view(-1, 512, 4, 4)            # fmap_size*8 == 512
        img = self.deconv(x)
        return img

# 完整模型
class TextToImageModel(nn.Module):
    def __init__(self, vocab_size, embed_dim=256, hidden_dim=512, num_layers=1):
        super().__init__()
        self.encoder   = TextEncoder(vocab_size, embed_dim, hidden_dim, num_layers)
        self.attention = AdditiveAttention(hidden_dim)
        self.decoder   = ImageDecoder(hidden_dim)

    def forward(self, input_ids, attention_mask=None):
        enc_outs, final_hidden = self.encoder(input_ids, attention_mask)
        context, _            = self.attention(enc_outs, final_hidden)
        combined             = torch.cat([final_hidden, context], dim=-1)
        img, attn_weights    = self.decoder(combined), None
        return img, attn_weights

# 判别器保持动态下采样，多出来的解码层不用管
class Discriminator(nn.Module):
    def __init__(self, hidden_dim=512, fmap_size=64):
        super().__init__()
        self.img_branch = nn.Sequential(
            # 64×64 → 32×32
            nn.Conv2d(3, fmap_size, 4, 2, 1), nn.LeakyReLU(0.2,True),
            # 32→16
            nn.Conv2d(fmap_size, fmap_size*2, 4, 2,1), nn.BatchNorm2d(fmap_size*2), nn.LeakyReLU(0.2,True),
            # 16→8
            nn.Conv2d(fmap_size*2, fmap_size*4, 4, 2,1), nn.BatchNorm2d(fmap_size*4), nn.LeakyReLU(0.2,True),
            # 8→4
            nn.Conv2d(fmap_size*4, fmap_size*8, 4, 2,1), nn.BatchNorm2d(fmap_size*8), nn.LeakyReLU(0.2,True),
        )
        self.hidden_dim = hidden_dim
        self.text_proj  = None
        self.joint_conv = nn.Sequential(
            nn.Conv2d(fmap_size*16, fmap_size*8, 3,1,1), nn.BatchNorm2d(fmap_size*8), nn.LeakyReLU(0.2,True),
            nn.Conv2d(fmap_size*8, 1, 3,1,1)
        )

    def forward(self, img, text_feat):
        x_img = self.img_branch(img)      # (B,512,4,4)
        B,C,Hf,Wf = x_img.shape
        if self.text_proj is None:
            self.text_proj = nn.Linear(self.hidden_dim, C*Hf*Wf).to(img.device)
        x_txt = self.text_proj(text_feat).view(B, C, Hf, Wf)
        x     = torch.cat([x_img, x_txt], dim=1)
        joint = self.joint_conv(x)        # (B,1,4,4)
        out   = torch.sigmoid(joint.view(B, -1).mean(1))
        return out