import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from .mobilenetv3 import MobileNetV3Rnn
from .resnet import resnet50
from .resnet_cbam import resnet50cbam, resnet34cbam

class MobileNetV3FeatureExtractor(nn.Module):
    def __init__(self, input_height=50, input_channels=3):
        super().__init__()
        # 加载预训练 MobileNetV3 Small
        self.backbone = models.mobilenet_v3_small(pretrained=True)
        
        # 移除最后的分类层
        self.backbone.classifier = nn.Identity()
        
        # 调整输入通道数（如果需要灰度图）
        if input_channels != 3:
            self.backbone.features[0][0] = nn.Conv2d(
                input_channels, 16, kernel_size=3, stride=2, padding=1, bias=False
            )
        
        # 自定义输出层
        self.output_conv = nn.Conv2d(576, 512, kernel_size=1)  # 调整通道数

        # 高度方向池化
        self.height_pool = nn.AdaptiveAvgPool2d((1, None))  # 将 H 压缩为 1

    def forward(self, x):
        # 提取特征
        x = self.backbone.features(x)
        x = self.output_conv(x)  # (B, 512, H, W)
        # 高度方向池化
        x = self.height_pool(x)  # (B, 512, 1, W)

        x = x.squeeze(dim=2)  # (B, 512, W)
        
        # 调整维度为 (B, W, C)
        x = x.permute(0, 2, 1)  # (B, W, 512)
        return x

class CBAM(nn.Module):
    def __init__(self, channels, reduction_ratio=16):
        super().__init__()
        # Channel Attention
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(channels // reduction_ratio, channels))
        # Spatial Attention
        self.conv = nn.Conv2d(2, 1, kernel_size=7, padding=3)
        
    def forward(self, x):
        # Channel Attention
        avg_out = self.fc(self.avg_pool(x).squeeze())
        max_out = self.fc(self.max_pool(x).squeeze())
        channel_att = torch.sigmoid(avg_out + max_out).unsqueeze(2).unsqueeze(3)
        x = x * channel_att
        
        # Spatial Attention
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial_att = torch.sigmoid(self.conv(torch.cat([avg_out, max_out], dim=1)))
        return x * spatial_att

class BasicBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super().__init__()
        self.conv1 = nn.Conv2d(
            in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False
        )
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(
            out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False
        )
        self.bn2 = nn.BatchNorm2d(out_channels)
        
        # 跳跃连接：如果输入输出维度不一致，使用 1x1 卷积调整
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
            
    def forward(self, x):
        residual = self.shortcut(x)
        x = F.elu(self.bn1(self.conv1(x)))
        x = self.bn2(self.conv2(x))
        x += residual  # 残差连接
        return F.elu(x)

class SEBlock(nn.Module):
    def __init__(self, channel, reduction=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction),
            nn.ReLU(),
            nn.Linear(channel // reduction, channel),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y  # 通道级加权

class BidirectionalLSTM(nn.Module):
    def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()

        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
        self.embedding = nn.Linear(nHidden * 2, nOut)

    def forward(self, input):
        recurrent, _ = self.rnn(input)
        T, b, h = recurrent.size()
        t_rec = recurrent.view(T * b, h)

        output = self.embedding(t_rec)  # [T * b, nOut]
        output = output.view(T, b, -1)

        return output

class STN(nn.Module):
    def __init__(self):
        super(STN, self).__init__()
        self.localization = nn.Sequential(
            nn.Conv2d(1, 8, kernel_size=7),
            nn.MaxPool2d(2, stride=2),
            nn.ReLU(),
            nn.Conv2d(8, 10, kernel_size=5),
            nn.MaxPool2d(2, stride=2),
            nn.ReLU()
        )
        self.fc_loc = nn.Sequential(
            nn.Linear(10 * 9 * 26, 32),  # 根据输入尺寸调整
            nn.ReLU(),
            nn.Linear(32, 3 * 2)          # 输出仿射变换矩阵
        )
        self.fc_loc[2].weight.data.zero_()
        self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))

    def forward(self, x):
        xs = self.localization(x)
        xs = xs.view(-1, 10 * 9 * 26)
        theta = self.fc_loc(xs)
        theta = theta.view(-1, 2, 3)
        grid = F.affine_grid(theta, x.size(), align_corners=False)
        x = F.grid_sample(x, grid, align_corners=False)
        return x

class CRNN(nn.Module):

    def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
        super(CRNN, self).__init__()
        assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
        # self.cnn = MobileNetV3Rnn(nc)
        # self.cnn = resnet34cbam(1)

        ks = [3, 3, 3, 3, 3, 3, 2]
        ps = [1, 1, 1, 1, 1, 1, 0]
        ss = [1, 1, 1, 1, 1, 1, 1]
        # ds = [1, 1, 1, 1, 1, 1, 1]
        nm = [64, 128, 256, 256, 512, 512, 512]

        cnn = nn.Sequential()

        def convRelu(i, batchNormalization=False):
            nIn = nc if i == 0 else nm[i - 1]
            nOut = nm[i]
            cnn.add_module('conv{0}'.format(i),
                           nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
            # cnn.add_module('BasicBlock{0}'.format(i),
            #                BasicBlock(nIn, nOut, ss[i]))             
            if batchNormalization:
                cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
                # cnn.add_module('seblock{0}'.format(i), SEBlock(nOut))
            if leakyRelu:
                cnn.add_module('relu{0}'.format(i),
                               nn.LeakyReLU(0.2, inplace=True))
            else:
                cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
                # cnn.add_module('elu{0}'.format(i), nn.ELU(True))

        convRelu(0)
        cnn.add_module('cbam{0}'.format(0), CBAM(64))  # 插入CBAM
        cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))  # 64x16x64
        convRelu(1)
        cnn.add_module('cbam{0}'.format(1), CBAM(128))  # 插入CBAM
        cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))  # 128x8x32
        convRelu(2, True)
        cnn.add_module('cbam{0}'.format(2), CBAM(256))  # 插入CBAM
        convRelu(3)
        cnn.add_module('cbam{0}'.format(3), CBAM(256))  # 插入CBAM
        cnn.add_module('pooling{0}'.format(2),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 256x4x16
        convRelu(4, True)
        cnn.add_module('cbam{0}'.format(4), CBAM(512))  # 插入CBAM
        convRelu(5)
        cnn.add_module('cbam{0}'.format(5), CBAM(512))  # 插入CBAM
        cnn.add_module('pooling{0}'.format(3),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 512x2x16
        convRelu(6, True)  # 512x1x16

        #高度方向池化
        self.cnn = cnn
        self.stn = STN()  # 可选
        self.height_pool = nn.AdaptiveAvgPool2d((1, None))  # 将 H 压缩为 1
        # nh = 1024
        self.rnn = nn.Sequential(
            BidirectionalLSTM(512, nh, nh),
            BidirectionalLSTM(nh, nh, nclass))
        # 初始化权重
        # self._init_weights()
        
    def _init_weights(self):
        # 初始化卷积层 (Kaiming 初始化适合 ReLU)
        for layer in self.cnn:
            if isinstance(layer, nn.Conv2d):
                nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
                
        # 初始化rnn
        for layer in self.rnn:
            if isinstance(layer, nn.LSTM):
                for name, param in layer.named_parameters():
                    if 'weight' in name:
                        nn.init.orthogonal_(param)
            
            elif isinstance(layer, nn.Linear):
                nn.init.xavier_normal_(layer.weight)
        
    
    def forward(self, input):
        # conv features
        conv = self.stn(input)
        conv = self.cnn(conv)
        conv = self.height_pool(conv)
        b, c, h, w = conv.size()
        assert h == 1, "the height of conv must be 1"
        conv = conv.squeeze(2)
        conv = conv.permute(2, 0, 1)  # [w, b, c]

        # rnn features
        output = self.rnn(conv)

        # output = F.log_softmax(output, dim=2).detach().requires_grad_()
        return output
        # CNN提取特征
        # print(input.size())
        # x = self.cnn(input)  # (B, W, C*H)
        # # RNN处理序列
        # x = self.rnn(x)  # (B, W, hidden_size * 2)
        # x = x.permute(1, 0, 2)
        # print(x.size())
        # return x, 1

class ResCRNN(nn.Module):
    def __init__(self, imgH, nc, nclass, nh, n_rnn=2):
        super().__init__()
        # 初始卷积层
        self.conv1 = nn.Sequential(
            nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)  # 下采样一次
        )
        
        # 残差模块堆叠
        self.layer1 = self._make_layer(64, 64, stride=2, num_blocks=2)
        self.layer2 = self._make_layer(64, 128, stride=2, num_blocks=2)  # 下采样
        self.layer3 = self._make_layer(128, 256, stride=2, num_blocks=2)  # 下采样
        self.layer4 = self._make_layer(256, 512, stride=2, num_blocks=2)  # 下采样
        
        # RNN 部分
        self.rnn = nn.Sequential(
            BidirectionalLSTM(512, nh, nh),
            BidirectionalLSTM(nh, nh, nclass))
        
    def _make_layer(self, in_channels, out_channels, stride, num_blocks):
        layers = []
        layers.append(BasicBlock(in_channels, out_channels, stride))
        for _ in range(1, num_blocks):
            layers.append(BasicBlock(out_channels, out_channels, stride=1))
        return nn.Sequential(*layers)
    
    def forward(self, x):
        # CNN 特征提取
        x = self.conv1(x)          # (B, 64, H/2, W)
        x = self.layer1(x)         # (B, 64, H/2, W)
        x = self.layer2(x)         # (B, 128, H/2, W)
        x = self.layer3(x)         # (B, 256, H/2, W)
        x = self.layer4(x)         # (B, 512, H/2, W)
        
        # 调整维度输入 RNN
        B, C, H, W = x.shape
        assert H == 1, "the height of conv must be 1"
        # x = x.view(B, H, W, C)     # (B, H, W, C) → (B, W, H*C)
        # x = x.permute(0, 2, 1, 3).contiguous().view(B, W, -1)  # (B, W, H*C)
        x = x.squeeze(2)
        x = x.permute(2, 0, 1)  # [w, b, c]

        # RNN 时序建模
        x = self.rnn(x)         # (B, W, 2*256)
        x = F.log_softmax(x, dim=2)
        return x