# from res2net_Fca import *
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tools import *
from torch.optim import lr_scheduler
import math
import sys
import time
from torchvision.transforms import RandAugment
import copy
# from  import *

is_CELU = True
is_ELU = False
def get_1d_dct(i, freq, L):
    result = math.cos(math.pi * freq * (i + 0.5) / L) / math.sqrt(L)
    if freq == 0: 
        return result 
    else: 
        return result * math.sqrt(2) 
def get_dct_weights( width, height, channel, fidx_u= [0,1,0,5,2,0,2,0,0,6,0,4,6,3,2,5], fidx_v= [0,0,6,0,0,1,1,4,5,1,3,0,0,0,2,3]):
    # width : width of input 
    # height : height of input 
    # channel : channel of input 
    # fidx_u : horizontal indices of selected fequency 
    # according to the paper, should be [0,0,6,0,0,1,1,4,5,1,3,0,0,0,2,3]
    # fidx_v : vertical indices of selected fequency 
    # according to the paper, should be [0,1,0,5,2,0,2,0,0,6,0,4,6,3,2,5]
    # [0,0],[0,1],[6,0],[0,5],[0,2],[1,0],[1,2],[4,0],
    # [5,0],[1,6],[3,0],[0,4],[0,6],[0,3],[2,2],[3,5],
    dct_weights = torch.zeros(1, channel, width, height)
    #channel为什么是torch.Size([32, 256, 8, 8])
    c_part = channel // len(fidx_u) 
    # split channel for multi-spectal attention 
    for i, (u_x, v_y) in enumerate(zip(fidx_u, fidx_v)): 
        for t_x in range(width): 
            for t_y in range(height): 
                dct_weights[:, i * c_part: (i+1)*c_part, t_x, t_y]\
                =get_1d_dct(t_x, u_x, width) * get_1d_dct(t_y, v_y, height) 
    # Eq. 7 in our paper 
    return dct_weights 


# 输入输出相同

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class FcaLayer(nn.Module):
    def __init__(self,
                 channel,
                 reduction,width,height):
        super(FcaLayer, self).__init__()
        self.register_buffer('pre_computed_dct_weights',get_dct_weights(width,height,channel).to(device))
        #self.register_parameter('pre_computed_dct_weights',torch.nn.Parameter(get_dct_weights(width,height,channel)))
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.CELU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        ).to(device)

    def forward(self, x):
        b, c, _, _ = x.size()
        y = torch.sum(x*self.pre_computed_dct_weights,dim=(2,3)).to(device)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

def show_shape(item):
    print(item.shape)
class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None, scales=4, groups=1, se=True):
        super(Bottleneck, self).__init__()
        self.downsample = downsample
        self.scales = scales
        self.groups = groups
        self.stride = stride

        outplanes = groups * planes

        self.conv1 = nn.Conv2d(in_channels=inplanes, out_channels=outplanes, kernel_size=1, stride=1, bias=False)
        self.bn1 = nn.BatchNorm2d(outplanes)

        self.conv2 = nn.ModuleList([nn.Conv2d(outplanes // scales, outplanes // scales,
                                              kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) for _
                                    in range(scales - 1)])
        # self.conv2 = nn.ModuleList([
        #     nn.Conv2d(outplanes // scales, outplanes // scales, kernel_size=3, stride=1, padding=1, groups=groups, bias=False)
        #     if i != (scales - 2)
        #     else nn.Conv2d(outplanes // scales, outplanes // scales, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
        #     for i in range(scales - 1)
        # ])
        
        self.bn2 = nn.ModuleList([nn.BatchNorm2d(outplanes // scales) for _ in range(scales - 1)])

        self.conv3 = nn.Conv2d(outplanes, planes * self.expansion, kernel_size=1, stride=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)

        #self.relu = nn.ReLU(inplace=True)
        if is_CELU:
            self.af = nn.CELU(inplace=True)
        elif is_ELU:
            self.af = nn.ELU(inplace=True)
        else:
            self.af = nn.ReLU(inplace=True)
        # self.se = FcaLayer(planes * self.expansion,width,height) if se else None
        self.fac = None
        self.pool = nn.AvgPool2d(kernel_size=2)

    def forward(self, x):
        identity = x

        if self.downsample is not None:
            identity = self.downsample(identity)

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.af(out)

        x_scales = torch.chunk(out, self.scales, 1)
        #print(list(map(show_shape, x_scales)))
        #print(x_scales[4])
        # for index, i in enumerate(x_scales):
        #     # for index_p, p in enumerate(i):
        #     #     print(index, index_p)
        #     print(index, i.shape)

        
        for i in range(self.scales - 1):
            if i == 0:
                y_scale = x_scales[i]
                #print(0, y_scale.shape)
            else:
                #print(y_scale.shape, x_scales[i].shape)
                y_scale = y_scale + x_scales[i]
                
            y_scale_reshape = y_scale
            y_scale_reshape = self.conv2[i](y_scale)
            y_scale_reshape = self.af(self.bn2[i](y_scale_reshape))
            
            if i == 0:
                out = y_scale_reshape
            else:
                #print(out.shape, y_scale_reshape.shape, y_scale.shape)
                out = torch.cat((out, y_scale_reshape), 1)



        if out.size()[2] != x_scales[self.scales - 1].size()[2]:
            out = torch.cat((out, self.pool(x_scales[self.scales - 1])), 1)
        elif self.scales != 1:
            out = torch.cat((out, x_scales[self.scales - 1]), 1)
            

        out = self.conv3(out)
        out = self.bn3(out)

        #print(out.size())


        #out = FcaLayer(channel, 16, width, height) 
        if self.fac is None:
            channel = out.size(1)
            width = out.size(2)
            height = out.size(3)
            self.fac = FcaLayer(channel, 16, width, height) 
        out = self.fac(out)
        


        out += identity
        out = self.af(out)

        return out


class Res2Net(nn.Module):
    def __init__(self, block, layers, num_classes=100, scales=4, groups=1, se=True):
        super(Res2Net, self).__init__()
        self.inplanes = 64

        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes)
        self.af = nn.CELU(inplace=True)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], stride=1, scales=scales, groups=groups, se=se)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, scales=scales, groups=groups, se=se)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, scales=scales, groups=groups, se=se)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, scales=scales, groups=groups, se=se)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def _make_layer(self, block, planes, layer, stride=1, scales=4, groups=1, se=True):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample,
                            scales=scales, groups=groups, se=se))
        self.inplanes = planes * block.expansion

        for i in range(1, layer):
            layers.append(block(self.inplanes, planes, scales=scales, groups=groups, se=se))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        #x = self.relu(x)
        x=self.af(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x


def res2net50_se(num_classes=1000, scales=4, groups=1):
    return Res2Net(Bottleneck, [3, 4, 6, 3], num_classes, scales, groups, se=True)

def res2net18_fca(num_classes=1000, scales=4, groups=1, se=True):
    return Res2Net(Bottleneck, [2, 2, 2, 2], num_classes, scales, groups, se)
