import torch
import torch.nn as nn

class TransformerEncoderLayer(nn.Module):
    def __init__(self, in_channels, out_channels, nhead=8, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        assert in_channels % nhead == 0, "in_channels must be divisible by nhead"
        self.self_attn = nn.MultiheadAttention(in_channels, nhead, dropout=dropout)
        self.linear = nn.Linear(in_channels, out_channels)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, src):
        src = src.permute(1, 0, 2) 
        attn_output, _ = self.self_attn(src, src, src)
        attn_output = attn_output.permute(1, 0, 2)  
        linear_output = self.linear(attn_output)
        return self.dropout(linear_output)

class TransformerDecoderLayer(nn.Module):
    def __init__(self, in_channels, out_channels, nhead=8, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        assert in_channels % nhead == 0, "in_channels must be divisible by nhead"
        self.self_attn = nn.MultiheadAttention(in_channels, nhead, dropout=dropout)
        self.multihead_attn = nn.MultiheadAttention(in_channels, nhead, dropout=dropout)
        self.linear = nn.Linear(in_channels, out_channels)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, tgt, memory):
        tgt = tgt.permute(1, 0, 2)
        memory = memory.permute(1, 0, 2)
        self_attn_output, _ = self.self_attn(tgt, tgt, tgt)
        multihead_output, _ = self.multihead_attn(self_attn_output, memory, memory)
        multihead_output = multihead_output.permute(1, 0, 2)
        linear_output = self.linear(multihead_output)
        return self.dropout(linear_output)

class TransformerGenerator(nn.Module):
    def __init__(self, in_channels=3, out_channels=3, nhead=8, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        self.hidden_channels = 64  
        self.initial_conv = nn.Conv2d(in_channels, self.hidden_channels, kernel_size=3, padding=1)

        assert self.hidden_channels % nhead == 0, "hidden_channels must be divisible by nhead"

        encoder_layer = TransformerEncoderLayer(
            in_channels=self.hidden_channels,
            out_channels=self.hidden_channels,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout
        )
        self.encoder = nn.ModuleList([encoder_layer for _ in range(num_encoder_layers)])
        

        decoder_layer = TransformerDecoderLayer(
            in_channels=self.hidden_channels,
            out_channels=self.hidden_channels,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout
        )
        self.decoder = nn.ModuleList([decoder_layer for _ in range(num_decoder_layers)])

        self.final_conv = nn.Conv2d(self.hidden_channels, out_channels, kernel_size=3, padding=1)

    def forward(self, x):

        x = self.initial_conv(x)
        

        batch_size, channels, height, width = x.size()
        x_seq = x.view(batch_size, channels, height * width).permute(2, 0, 1)
        

        for encoder_layer in self.encoder:
            x_seq = encoder_layer(x_seq)
        

        for decoder_layer in self.decoder:
            x_seq = decoder_layer(x_seq, x_seq)
        

        x = x_seq.permute(1, 2, 0).view(batch_size, channels, height, width)
        
        return torch.sigmoid(self.final_conv(x))

class Discriminator(nn.Module):
    def __init__(self, in_channels=3):
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(256, 1, kernel_size=4, stride=2, padding=1)
        )
    
    def forward(self, x):
        return self.model(x)
