import torch
from torch import nn
import torch.nn.functional as F


class CNNMnistAdapter(nn.Module):
    def __init__(self):
        super(CNNMnistAdapter, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2))

        self.fc = nn.Linear(7 * 7 * 32, 10)

        # 添加编码解码器结构
        self.adapter = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(32, 32, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(32, 16, 3, padding=1),
                nn.ReLU()
            ),
            nn.Sequential(
                nn.Conv2d(16, 32, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(32, 32, 3, padding=1),
                nn.ReLU()
            )
        ])
        self.clip_linear = nn.Linear(512, 16*7*7)

    def forward(self, x, clip_vector):
        out = self.layer1(x)
        out = self.layer2(out)

        adapter_vec = self.adapter[0](out)

        if clip_vector != None:
            clip_vector = self.clip_linear(clip_vector)
            clip_vector = clip_vector.reshape(adapter_vec.shape)
            # adapter_vec与attn_out做残差
            residual_output = F.relu(adapter_vec + clip_vector)
        else:
            residual_output = adapter_vec

        decoder_vec = self.adapter[1](residual_output)
        # out与decoder_vec做残差
        out = F.relu(out + decoder_vec)

        z = out.view(out.size(0), -1)
        out = self.fc(z)

        return out, z


class CNNCifarAdapter(nn.Module):
    def __init__(self):
        super(CNNCifarAdapter, self).__init__()
        self.conv1 = nn.Conv2d(3, 128, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(128, 128, 3)
        self.conv3 = nn.Conv2d(128, 128, 3)

        self.fc1 = nn.Linear(128 * 4 * 4, 10)

        # 添加编码解码器结构
        self.adapter = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(128, 64, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(64, 32, 3, padding=1),
                nn.ReLU()
            ),
            nn.Sequential(
                nn.Conv2d(32, 64, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(64, 128, 3, padding=1),
                nn.ReLU()
            )
        ])

        self.clip_linear = nn.Linear(512, 32*4*4)

    def forward(self, x, clip_vector):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))

        adapter_vec = self.adapter[0](x)

        if clip_vector != None:
            clip_vector = self.clip_linear(clip_vector)
            clip_vector = clip_vector.reshape(adapter_vec.shape)
            # adapter_vec与attn_out做残差
            residual_output = F.relu(adapter_vec + clip_vector)
        else:
            residual_output = adapter_vec

        decoder_vec = self.adapter[1](residual_output)
        # out与decoder_vec做残差
        x = F.relu(x + decoder_vec)
        z = x.view(x.size(0), -1)

        x = self.fc1(z)
        return x, z


class CNNCifar100Adapter(nn.Module):
    def __init__(self):
        super(CNNCifar100Adapter, self).__init__()
        self.conv1 = nn.Conv2d(3, 256, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(256, 256, 3)
        self.conv3 = nn.Conv2d(256, 128, 3)

        self.fc1 = nn.Linear(128 * 4 * 4, 100)

        # 添加编码解码器结构
        self.adapter = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(128, 64, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(64, 32, 3, padding=1),
                nn.ReLU()
            ),
            nn.Sequential(
                nn.Conv2d(32, 64, 3, padding=1),
                nn.ReLU(),
                nn.Conv2d(64, 128, 3, padding=1),
                nn.ReLU()
            )
        ])

        self.clip_linear = nn.Linear(512, 32 * 4 * 4)

    def forward(self, x, clip_vector):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))

        adapter_vec = self.adapter[0](x)

        if clip_vector != None:
            clip_vector = self.clip_linear(clip_vector)
            clip_vector = clip_vector.reshape(adapter_vec.shape)
            # adapter_vec与attn_out做残差
            residual_output = F.relu(adapter_vec + clip_vector)
        else:
            residual_output = adapter_vec

        decoder_vec = self.adapter[1](residual_output)
        # out与decoder_vec做残差
        x = F.relu(x + decoder_vec)
        z = x.view(x.size(0), -1)

        x = self.fc1(z)
        return x, z


class SimpleCNNAdapter(nn.Module):
    """
    input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=10
    """
    def __init__(self, input_dim, hidden_dims, output_dim=10, channel=3):
        super(SimpleCNNAdapter, self).__init__()
        self.conv1 = nn.Conv2d(channel, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)

        # for now, we hard coded this network
        # i.e. we fix the number of hidden layers i.e. 2 layers
        self.fc1 = nn.Linear(input_dim, hidden_dims[0])
        self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
        self.fc3 = nn.Linear(hidden_dims[1], output_dim)

        self.fc_adapter = nn.Linear(input_dim + 512, input_dim)

    def forward(self, x, clip_vec):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)

        adapter = F.relu(self.fc_adapter(torch.cat((x, clip_vec), dim=1)))

        z = adapter + x
        x = F.relu(self.fc1(x + adapter))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x, z


class SimpleCNN(nn.Module):
    def __init__(self, input_dim, hidden_dims, output_dim=10):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)

        # for now, we hard coded this network
        # i.e. we fix the number of hidden layers i.e. 2 layers
        self.fc1 = nn.Linear(input_dim, hidden_dims[0])
        self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
        self.fc3 = nn.Linear(hidden_dims[1], output_dim)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)

        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


class CNNCifarAdapter(nn.Module):
    def __init__(self):
        super(CNNCifarAdapter, self).__init__()
        self.conv1 = nn.Conv2d(3, 128, 3)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(128, 128, 3)
        self.conv3 = nn.Conv2d(128, 128, 3)
        self.fc1 = nn.Linear(128 * 4 * 4, 10)

        self.fc_adapter = nn.Linear(128 * 4 * 4 + 512, 128 * 4 * 4)

    def forward(self, x, clip_vec):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))
        x = x.view(-1, 128 * 4 * 4)

        adapter = F.relu(self.fc_adapter(torch.cat((x, clip_vec), dim=1)))

        z = adapter + x

        x = self.fc1(adapter + x)
        return x, z


if __name__ == '__main__':
    # data = torch.randn(128, 3, 32, 32)
    # clip_vecs = torch.randn(128, 512)
    # model = CNNCifar100Adapter()
    # y, z = model(data, clip_vecs)
    # y, z = model(data, None)
    # print(y)
    # print(z)

    data = torch.randn(1, 1, 32, 32)
    clip_vec = torch.randn(1, 512)

    net = SimpleCNNAdapter(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=10, channel=1)
    output = net(data, clip_vec)
    print(output)