import torch
import torch.nn as nn
import torch.nn.functional as F

class HighwayBlock(nn.Module):
  def __init__(self, hidde_size = 100, use_down_sample = False, use_complex_model = False,  wh_threshold = 1):
    ''' 
    use_complex_model为true则使用复杂的版本。它非常耗时，而且事实上和简单版本感觉没什么区别。
    wh_threshold在use_complex_model为true时使用，它越小，占用内存越小。
    hidde_size为use_complex_model为false时使用。它表示会被映射成(c*w*h, hidde_size) 和(hidde_size,c*w*h)的两个矩阵。
    use_down_sample再use_complex_model为false时使用。True表示先把特征图先下采样再进入矩阵。
    '''
    super(HighwayBlock, self).__init__()
    # 降维操作，将输入形状降低到一个较小的维度
    self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    self.pool = nn.AvgPool2d(kernel_size = 2, stride = 2).to(self.device)
    self.wh_threshold = wh_threshold
    self.gate = None
    self.sigmoid = nn.Sigmoid()
    self.channel_conv = None
    self.hidde_size = hidde_size
    self.use_complex_model = use_complex_model
    self.wh_threshold = wh_threshold
    self.use_down_sample = use_down_sample
    self.dim_reduction = None

  def forward(self, origin_x, layer_out, down_sample_origin_x):
    H = None
    if self.use_complex_model is True:
        b, c, w, h = origin_x.shape
        temp_w = w
        while temp_w > self.wh_threshold:
            origin_x = self.pool(origin_x)
            temp_w /= 2
        b, c_out, w_o, h_o = layer_out.shape
        if c_out != c:
          if self.channel_conv is None:
            self.channel_conv = conv1x1(c, c_out)
          origin_x = self.channel_conv(origin_x)
        b, c, w_f, h_f = origin_x.shape
        in_s = w_f * h_f
        origin_x_flat = origin_x.view(c, b, in_s)
        if self.gate is None:
            ou_s = w_o * h_o
            self.gate = [
                nn.Linear(in_s, ou_s).to(self.device)
                for _ in range(c)
            ]
        # 计算门控函数
        T = self.sigmoid(torch.stack([
            self.gate[i](origin_x_flat[i].to(self.device)) for i in range(c)
        ])).view(layer_out.size())
        # 根据门控函数进行加权求和
        H = layer_out * T + down_sample_origin_x * (1 - T)
      
    else:
        b, c, w, h = origin_x.shape
        if self.use_down_sample is True:
            if self.dim_reduction is None:
                self.dim_reduction = nn.Conv2d(c, out_channels=1, kernel_size=1).to(self.device)
            origin_x = self.dim_reduction(origin_x).to(self.device)
        b, c_r, w_r, h_r = origin_x.shape
        in_shape = c_r * w_r * h_r
        b, c_o, w_o, h_o = layer_out.shape
        out_shape = c_o * w_o * h_o
        if self.gate is None:
            #最大也就64 * 32 * 32 = 6w。
            self.gate = nn.Sequential(*[
              nn.Linear(in_shape, self.hidde_size),
              nn.Linear(self.hidde_size, out_shape)
            ]).to(self.device)
        origin_x_flatten = origin_x.view(origin_x.size(0), -1)
        T = self.sigmoid(self.gate(origin_x_flatten)).view(layer_out.size())
        H = 2 * (layer_out * T + down_sample_origin_x * (1 - T))

    return H