import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


# 定义DCT基函数
def dct_weights(size, num_components):
    dct_matrix = np.zeros((num_components, size))
    for k in range(num_components):
        for n in range(size):
            if k == 0:
                dct_matrix[k, n] = 1 / np.sqrt(size)
            else:
                dct_matrix[k, n] = np.sqrt(2 / size) * np.cos((np.pi * (2 * n + 1) * k) / (2 * size))
    return torch.tensor(dct_matrix, dtype=torch.float32)

class MSCA(nn.Module):
    def __init__(self, channels, reduction=4, num_dct_components=8, strategy='LF'):
        super(MSCA, self).__init__()
        self.strategy = strategy
        self.num_dct_components = num_dct_components
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.dct_weights = dct_weights(channels, num_dct_components).to(self.device)
        self.fc1 = nn.Linear(num_dct_components, channels // reduction, bias=False)
        self.fc2 = nn.Linear(channels // reduction, channels, bias=False)

    def forward(self, x):
        batch, channels, height, width = x.size()  # x: (B, C, H, W)

        # 全局平均池化
        y = F.adaptive_avg_pool2d(x, 1).view(batch, channels)  # y: (B, C)

        # 根据策略选择的 DCT 变换
        # 根据策略选择的 DCT 变换
        if self.strategy == 'LF':
            # 低频选择策略
            y_dct = torch.matmul(y, self.dct_weights.transpose(0, 1))  # y_dct: (B, Nc)


        elif self.strategy == 'TS':
            # 两步选择策略
            low_freq_dct = torch.matmul(y, self.dct_weights.transpose(0, 1))  # 初步选择低频成分，形状为 (B, Nc)
            # 假设我们将重要的成分简单地选择为前半部分，然后进行拼接，确保形状为 (B, Nc)
            y_dct_half = low_freq_dct[:, :self.num_dct_components // 2]  # 形状为 (B, Nc // 2)
            y_dct = torch.cat([y_dct_half, y_dct_half], dim=1)  # 拼接后形状为 (B, Nc)

        elif self.strategy == 'NAS':
            # 神经架构搜索策略
            # 这里假设NAS策略是通过一个额外的小型神经网络来选择重要成分
            nas_selector = nn.Sequential(
                nn.Linear(channels, channels // 2),
                nn.ReLU(),
                nn.Linear(channels // 2, channels)
            ).to(self.device)
            selection_weights = nas_selector(y)  # 形状为 (B, C)
            y_dct = torch.matmul(selection_weights, self.dct_weights.transpose(0, 1))  # 根据选择权重调整DCT成分

        # 重塑 y_dct 的形状以匹配全连接层的输入
        y_dct = y_dct.view(batch, -1)  # y_dct: (B, Nc)

        # 使用全连接层降维
        y = F.relu(self.fc1(y_dct))  # y: (B, C // r)
        y = torch.sigmoid(self.fc2(y)).view(batch, channels, 1, 1)  # y: (B, C, 1, 1)

        # 重新校准通道特征
        output = x * y  # output: (B, C, H, W)

        return output


class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, in_channels, out_channels, stride=1, downsample=None, strategy='LF'):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.downsample = downsample
        self.stride = stride
        self.msca = MSCA(out_channels, strategy=strategy)

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        out = self.msca(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out


class Fcanet(nn.Module):
    def __init__(self, block, layers, num_classes=100, strategy='LF'):
        super(Fcanet, self).__init__()
        self.in_channels = 16  # 进一步减少初始通道数
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 32, layers[0], stride=2, strategy=strategy)  # 第一层
        self.layer2 = self._make_layer(block, 64, layers[1], stride=2, strategy=strategy)  # 第二层
        # self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
        # self.layer4 = self._make_layer(block, 256, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(64 * block.expansion, num_classes)

    def _make_layer(self, block, out_channels, blocks, stride=1, strategy='LF'):
        downsample = None
        if stride != 1 or self.in_channels != out_channels * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.in_channels, out_channels * block.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels * block.expansion),
            )

        layers = []
        layers.append(block(self.in_channels, out_channels, stride, downsample, strategy))
        self.in_channels = out_channels * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.in_channels, out_channels, strategy=strategy))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)

        return x


def make_Fcanet(strategy='LF'):
    return Fcanet(BasicBlock, [2,2,2,2], strategy=strategy)