import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange


class CSwinAttention(nn.Module):
    def __init__(self, dim, num_heads, window_size, split_size=None):
        super(CSwinAttention, self).__init__()
        self.dim = dim
        self.num_heads = num_heads
        self.window_size = window_size
        self.split_size = split_size if split_size is not None else window_size

        self.qkv = nn.Linear(dim, dim * 3, bias=False)
        self.proj = nn.Linear(dim, dim)

        self.attention = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)

    def forward(self, x):
        B, H, W, C = x.shape
        x = rearrange(x, 'b h w c -> b (h w) c')
        qkv = self.qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads), qkv)
        
        # Window partition
        q_windows = q.reshape((B, H // self.window_size, self.window_size, W // self.window_size, self.window_size, -1))
        k_windows = k.reshape((B, H // self.window_size, self.window_size, W // self.window_size, self.window_size, -1))
        v_windows = v.reshape((B, H // self.window_size, self.window_size, W // self.window_size, self.window_size, -1))

        # Attention within each window
        attn_output = []
        for i in range(H // self.window_size):
            for j in range(W // self.window_size):
                q_win = q_windows[:, i, :, j, :, :].reshape(B, self.window_size**2, -1)
                k_win = k_windows[:, i, :, j, :, :].reshape(B, self.window_size**2, -1)
                v_win = v_windows[:, i, :, j, :, :].reshape(B, self.window_size**2, -1)
                attn_out, _ = self.attention(q_win, k_win, v_win)
                attn_output.append(attn_out.reshape(B, self.window_size, self.window_size, -1))

        attn_output = torch.cat([torch.cat(attn_output[i * (W // self.window_size):(i + 1) * (W // self.window_size)], dim=2) for i in range(H // self.window_size)], dim=1)
        attn_output = rearrange(attn_output, 'b h w c -> b (h w) c')
        attn_output = self.proj(attn_output)
        attn_output = rearrange(attn_output, 'b (h w) c -> b h w c', h=H, w=W)
        return attn_output

class CSwinTransformerBlock(nn.Module):
    def __init__(self, dim, num_heads, window_size, mlp_ratio=4.0, dropout=0.0, attention_dropout=0.0):
        super(CSwinTransformerBlock, self).__init__()
        self.norm1 = nn.LayerNorm(dim)
        self.attn = CSwinAttention(dim, num_heads, window_size)
        self.norm2 = nn.LayerNorm(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = nn.Sequential(
            nn.Linear(dim, mlp_hidden_dim),
            nn.GELU(),
            nn.Linear(mlp_hidden_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        x = x + self.attn(self.norm1(x))
        x = x + self.mlp(self.norm2(x))
        return x


class BasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu=nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.downsample = downsample

    def forward(self, x):
        identity = x
        out=self.conv1(x)
        out=self.bn1(out)
        out=self.relu(out)
        out=self.conv2(out)
        out=self.bn2(out)
        if self.downsample is not None:
            identity = self.downsample(x)
        out += identity
        out = self.relu(out)
        return out

class CSwinNet(nn.Module):
    def __init__(self,block = BasicBlock,layers = [2, 2, 2, 2] ,num_classes=100, drop = 0):
        super(CSwinNet,self).__init__()
        self.in_channels=64
        self.conv1=nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1,bias=False)
        self.bn=nn.BatchNorm2d(64)
        self.relu=nn.ReLU(inplace=True)
        self.layer1=self.make_layer(block,64,layers[0])
        self.layer2=self.make_layer(block,128,layers[1])
        self.layer3=self.make_layer(block,256,layers[2],2)
        self.layer4=self.make_layer(block,512,layers[3],2)
        self.csblock4 = CSwinTransformerBlock(
            dim = 512, num_heads = 16, window_size=4, mlp_ratio=4.0, dropout=0.1
        )
        self.avg_pool=nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512, num_classes)
        self.drop = nn.Dropout(drop)
    
    def make_layer(self, block, out_channels, blocks, stride=1):
        downsample = None
        if stride != 1 or self.in_channels != out_channels:
            downsample = nn.Sequential(
                nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
                )
        layers = []
        layers.append(block(self.in_channels, out_channels, stride, downsample))
        self.in_channels = out_channels
        for i in range(1, blocks):
            layers.append(block(out_channels, out_channels))

        return nn.Sequential(*layers)
    
    def forward(self,x):
        out=self.conv1(x)
        out=self.bn(out)
        out=self.relu(out)
        out=self.layer1(out)
        out=self.layer2(out)
        out=self.layer3(out)
        out=self.layer4(out)
        out = out.permute(0, 2, 3, 1)
        out = self.csblock4(out)
        out = out.permute(0, 3, 1, 2)
        out=self.avg_pool(out)
        out=torch.flatten(out,1)
        out=self.fc(out)
        out = self.drop(out)
        return out
