import math
import torch
from torch import nn
from torch.functional import F
import numpy as np


class BasicConv(nn.Module):
    def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,
                 bn=True, bias=False):
        super(BasicConv, self).__init__()
        self.out_channels = out_planes
        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
                              dilation=dilation, groups=groups, bias=bias)
        self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
        self.relu = nn.ReLU() if relu else None

    def forward(self, x):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.relu is not None:
            x = self.relu(x)
        return x


class Flatten(nn.Module):
    def forward(self, x):
        return x.view(x.size(0), -1)


class ChannelGate(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
        super(ChannelGate, self).__init__()
        self.gate_channels = gate_channels
        self.mlp = nn.Sequential(
            Flatten(),
            nn.Linear(gate_channels, gate_channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(gate_channels // reduction_ratio, gate_channels)
        )
        self.pool_types = pool_types

    def forward(self, x):
        channel_att_sum = None
        for pool_type in self.pool_types:
            if pool_type == 'avg':
                avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp(avg_pool)
            elif pool_type == 'max':
                max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp(max_pool)
            elif pool_type == 'lp':
                lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp(lp_pool)
            elif pool_type == 'lse':
                # LSE pool only
                lse_pool = logsumexp_2d(x)
                channel_att_raw = self.mlp(lse_pool)

            if channel_att_sum is None:
                channel_att_sum = channel_att_raw
            else:
                channel_att_sum = channel_att_sum + channel_att_raw

        scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
        return x * scale


def logsumexp_2d(tensor):
    tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
    s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
    outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
    return outputs


class ChannelPool(nn.Module):
    def forward(self, x):
        return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)


class SpatialGate(nn.Module):
    def __init__(self):
        super(SpatialGate, self).__init__()
        kernel_size = 7
        self.compress = ChannelPool()
        self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size - 1) // 2, relu=False)

    def forward(self, x):
        x_compress = self.compress(x)
        x_out = self.spatial(x_compress)
        scale = torch.sigmoid(x_out)  # broadcasting
        return x * scale


class CBAM(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=True):
        super(CBAM, self).__init__()
        self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
        self.no_spatial = no_spatial
        if not no_spatial:
            self.SpatialGate = SpatialGate()

    def forward(self, x):
        x_out = self.ChannelGate(x)
        if not self.no_spatial:
            x_out = self.SpatialGate(x_out)
        return x_out


def get_total_parameters(model):
    # 遍历模型的每个参数并累加参数数量
    _is_layer = not isinstance(model, torch.nn.modules.container.Sequential)
    if _is_layer:
        layer_params = 0
        layer = model
        for param in layer.parameters():
            layer_params += param.numel()
        # print('layer_params:', layer_params)
        return layer_params

    total_params = 0
    with torch.no_grad():
        for layer in model:
            # X = layer(X)
            layer_params = 0
            for param in layer.parameters():
                layer_params += param.numel()
            total_params += layer_params
            # if dbg:
            #     print('layer_params:', layer_params, '---', layer.__class__.__name__, 'output shape:\t', X.shape)
    return total_params


class ConvPart(nn.Module):
    def __init__(self):
        super().__init__()
        self.c1a = nn.Conv2d(3, 32, 5, padding=2)
        self.p1 = nn.MaxPool2d(2)
        self.c2a = nn.Conv2d(32, 32, 5, padding=2)
        self.p2 = nn.MaxPool2d(2)
        self.c3 = nn.Conv2d(32, 32, 5, padding=2)
        self.bn1a = nn.BatchNorm2d(32)
        self.bn2a = nn.BatchNorm2d(32)

    def forward(self, x):
        z = self.bn1a(F.leaky_relu(self.c1a(x)))
        z = self.p1(z)
        z = self.bn2a(F.leaky_relu(self.c2a(z)))
        z = self.p2(z)
        z = self.c3(z)
        return z


class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = ConvPart()
        self.final = nn.Linear(32, 1)
        self.optim = torch.optim.Adam(self.parameters(), lr=1e-4)

    def forward(self, x):
        z = self.conv(x)
        z = z.mean(3).mean(2)
        p = torch.sigmoid(self.final(z))[:, 0]
        return p, _


class Attention(nn.Module):
    def __init__(self, mem_in=32, query_in=32, key_size=32, output_size=32):
        super(Attention, self).__init__()
        self.key = nn.Conv1d(mem_in, key_size, 1, padding=0)
        self.value = nn.Conv1d(mem_in, output_size, 1, padding=0)
        self.query = nn.Conv1d(query_in, key_size, 1, padding=0)
        self.key_size = key_size

    def forward(self, x1, x2):
        queries = self.query(x1)  # Batch x Values x Keys
        keys = self.key(x2)  # Batch x Keysize x Keys
        values = self.value(x2)  # Batch x Values x Keys
        u = torch.sum(queries.unsqueeze(2) * keys.unsqueeze(3), 1) / np.sqrt(self.key_size)
        w = F.softmax(u, dim=1)
        out = torch.sum(w.unsqueeze(1) * values.unsqueeze(3), 2)
        return out, w


class MultiheadAttention(nn.Module):
    def __init__(self, mem_in=32, query_in=32, key_size=32, output_size=32, num_heads=4):
        super().__init__()
        self.layers = nn.ModuleList([Attention(mem_in, query_in, key_size, output_size) for i in range(num_heads)])
        self.proj_down = nn.Conv1d(num_heads * output_size, query_in, 1, padding=0)
        self.mixing_layer1 = nn.Conv1d(query_in, query_in, 1, padding=0)
        self.mixing_layer2 = nn.Conv1d(query_in, query_in, 1, padding=0)
        self.norm1 = nn.LayerNorm(query_in)
        self.norm2 = nn.LayerNorm(query_in)

    def forward(self, query, context):
        x1 = query.reshape(query.size(0), query.size(1), -1)
        x2 = context.reshape(context.size(0), context.size(1), -1)

        # Apply attention for each head
        z1, ws = [], []
        for i in range(len(self.layers)):
            z, w = self.layers[i](x1, x2)
            z1.append(z)
            ws.append(w)
        z1 = torch.cat(z1, 1)

        # Project down. Layer norm is a bit fiddly here - it wants the dimensions to normalize over to be the last dimensions
        z2 = self.norm1((self.proj_down(z1) + x2).transpose(1, 2).contiguous()).transpose(1, 2).contiguous()

        # Mixing layer
        z3 = self.norm2((self.mixing_layer2(F.relu(self.mixing_layer1(
            z2))) + z2).transpose(1, 2).contiguous()).transpose(1, 2).contiguous()

        if len(query.size()) == 4:
            z3 = z3.reshape(query.size(0), query.size(1), query.size(3), query.size(3))

        return z3, z1


class NetMultiheadAttention(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = ConvPart()
        self.attn1 = MultiHeadAttention(n_head=4, d_model=32, d_k=8, d_v=8)
        self.final = nn.Linear(32, 1)
        self.optim = torch.optim.Adam(self.parameters(), lr=1e-4)

    def forward(self, x):
        z = self.conv(x)
        q = torch.reshape(z, (z.size(0), -1, z.size(1)))
        q, w = self.attn1(q, q, q)
        q = torch.reshape(q, (z.size(0), z.size(1), z.size(2), z.size(3)))
        z = q.mean(3).mean(2)
        p = torch.sigmoid(self.final(z))[:, 0]
        return p, q


class NetCBAM(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = ConvPart()
        self.attn1 = CBAM(gate_channels=32)
        self.final = nn.Linear(32, 1)
        self.optim = torch.optim.Adam(self.parameters(), lr=1e-4)

    def forward(self, x):
        z = self.conv(x)
        q = self.attn1(z)
        z = q.mean(3).mean(2)
        p = torch.sigmoid(self.final(z))[:, 0]
        return p, q


if __name__ == '__main__':
    query = torch.rand(128, 256, 40, 90)
    attn_model = CBAM(gate_channels=query.shape[1])
    attn_output = attn_model(query)
    print(attn_output.size())
    print('get_total_parameters(attn_model):', get_total_parameters(attn_model))

    from torchvision.datasets import STL10

    # dataset = STL10("stl10", split='train', download=True)
    dataset = STL10("stl10", split='train', download=False)

    def getBatch(BS=10, offset=0, display_labels=False):
        xs = []
        labels = []
        for i in range(BS):
            x, y = dataset[offset + i]
            x = (np.array(x) - 128.0) / 128.0
            x = x.transpose(2, 0, 1)

            np.random.seed(i + 10)
            corrupt = np.random.randint(2)
            if corrupt:  # To corrupt the image, we'll just copy a patch from somewhere else
                pos_x = np.random.randint(96 - 16)
                pos_y = np.random.randint(96 - 16)
                x[:, pos_x:pos_x + 16, pos_y:pos_y + 16] = 1
            xs.append(x)
            labels.append(corrupt)

        if display_labels == True:
            print(labels)

        return np.array(xs), np.array(labels)

    getBatch()

    model = NetMultiheadAttention()
    train(model, att_flag=True)

    1