import cv2
import math
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

LR_RATE = 1e-4


class RAS(nn.Module):
    def __init__(self):
        super(RAS, self).__init__()
        # Backbone (VGG-like)
        self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
        self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
        self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
        self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
        self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
        self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
        self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
        self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
        self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)

        # ASPP Module
        self.aspp1 = nn.Conv2d(512, 256, 1)
        self.aspp6 = nn.Conv2d(512, 256, 3, padding=6, dilation=6)
        self.aspp12 = nn.Conv2d(512, 256, 3, padding=12, dilation=12)
        self.aspp18 = nn.Conv2d(512, 256, 3, padding=18, dilation=18)
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.aspp_gp = nn.Conv2d(512, 256, 1)
        self.aspp_reduce = nn.Conv2d(256 * 5, 512, 1)

        # PSP Module
        pool_sizes = [1, 2, 3, 6]
        self.psp_convs = nn.ModuleList([
            nn.Conv2d(512, 256, 1) for _ in pool_sizes
        ])
        self.psp_reduce = nn.Conv2d(512 + 256 * len(pool_sizes), 512, 1)

        # Side-outputs and reverse attention
        self.conv1_dsn6 = nn.Conv2d(512, 256, 1)
        self.conv2_dsn6 = nn.Conv2d(256, 256, 5, padding=2)
        self.conv3_dsn6 = nn.Conv2d(256, 256, 5, padding=2)
        self.conv4_dsn6 = nn.Conv2d(256, 256, 5, padding=2)
        self.conv5_dsn6 = nn.Conv2d(256, 1, 1)
        self.conv5_dsn6_up = nn.ConvTranspose2d(1, 1, 64, stride=32)
        self.conv5_dsn6_5 = nn.ConvTranspose2d(1, 1, 4, stride=2)

        # Remaining side branches as before
        def make_branch(in_ch):
            return nn.Sequential(
                nn.Conv2d(in_ch, 64, 1),
                nn.Conv2d(64, 64, 3, padding=1),
                nn.Conv2d(64, 64, 3, padding=1),
                nn.Conv2d(64, 1, 3, padding=1)
            )
        self.branch5 = make_branch(512)
        self.branch4 = make_branch(512)
        self.branch3 = make_branch(256)
        self.branch2 = make_branch(128)
        self.branch1 = make_branch(64)

        self.up5 = nn.ConvTranspose2d(1, 1, 32, stride=16)
        self.up5_4 = nn.ConvTranspose2d(1, 1, 4, stride=2)
        self.up4 = nn.ConvTranspose2d(1, 1, 16, stride=8)
        self.up4_3 = nn.ConvTranspose2d(1, 1, 4, stride=2)
        self.up3 = nn.ConvTranspose2d(1, 1, 8, stride=4)
        self.up3_2 = nn.ConvTranspose2d(1, 1, 4, stride=2)
        self.up2 = nn.ConvTranspose2d(1, 1, 4, stride=2)

        # Optimizer
        self.optim = optim.Adam(self.parameters(), lr=LR_RATE)
        self.apply(RAS.weights_init)

    def forward(self, x, im_path_pre=None):
        x_size = x.size()
        # Encoder
        x = F.relu(self.conv1_1(x)); x = F.relu(self.conv1_2(x))
        conv1_2 = x; x = F.max_pool2d(x, 2)
        x = F.relu(self.conv2_1(x)); x = F.relu(self.conv2_2(x))
        conv2_2 = x; x = F.max_pool2d(x, 2)
        x = F.relu(self.conv3_1(x)); x = F.relu(self.conv3_2(x)); x = F.relu(self.conv3_3(x))
        conv3_3 = x; x = F.max_pool2d(x, 2)
        x = F.relu(self.conv4_1(x)); x = F.relu(self.conv4_2(x)); x = F.relu(self.conv4_3(x))
        conv4_3 = x; x = F.max_pool2d(x, 3, 2, 1)
        x = F.relu(self.conv5_1(x)); x = F.relu(self.conv5_2(x)); x = F.relu(self.conv5_3(x))
        conv5_3 = x

        # ASPP
        aspp1 = F.relu(self.aspp1(conv5_3))
        aspp6 = F.relu(self.aspp6(conv5_3))
        aspp12 = F.relu(self.aspp12(conv5_3))
        aspp18 = F.relu(self.aspp18(conv5_3))
        gp = self.global_pool(conv5_3)
        gp = F.relu(self.aspp_gp(gp))
        gp = F.interpolate(gp, size=conv5_3.shape[2:], mode='bilinear', align_corners=False)
        aspp_cat = torch.cat([aspp1, aspp6, aspp12, aspp18, gp], dim=1)
        aspp_feat = F.relu(self.aspp_reduce(aspp_cat))

        # PSP
        psp_feats = [aspp_feat]
        pool_sizes = [1,2,3,6]
        for conv, size in zip(self.psp_convs, pool_sizes):
            pooled = F.adaptive_avg_pool2d(aspp_feat, output_size=size)
            pooled = F.relu(conv(pooled))
            pooled = F.interpolate(pooled, size=conv5_3.shape[2:], mode='bilinear', align_corners=False)
            psp_feats.append(pooled)
        psp_cat = torch.cat(psp_feats, dim=1)
        psp_feat = F.relu(self.psp_reduce(psp_cat))

        # Side-output 6 (with RA input from psp_feat)
        x6 = F.relu(self.conv1_dsn6(psp_feat)); x6 = F.relu(self.conv2_dsn6(x6))
        x6 = F.relu(self.conv3_dsn6(x6)); x6 = F.relu(self.conv4_dsn6(x6))
        conv5_dsn6 = self.conv5_dsn6(x6)
        up6 = self.crop(self.conv5_dsn6_up(conv5_dsn6), x_size)

        # Reverse Attention + branches
        def ra(feat, prev, ch):
            prev_crop = self.crop(prev, feat.size())
            inv = 1 - torch.sigmoid(prev_crop)
            inv = inv.expand(-1, ch, -1, -1)
            return inv * feat

        # Layer5
        ra5 = ra(conv5_3, self.conv5_dsn6_5(conv5_dsn6), 512)
        dsn5 = self.branch5(ra5)
        sum5 = dsn5 + self.crop(self.conv5_dsn6_5(conv5_dsn6), dsn5.size())
        up5 = self.crop(self.up5(sum5), x_size)

        # Layer4
        ra4 = ra(conv4_3, self.up5_4(sum5), 512)
        dsn4 = self.branch4(ra4)
        sum4 = dsn4 + self.crop(self.up5_4(sum5), dsn4.size())
        up4 = self.crop(self.up4(sum4), x_size)

        # Layer3
        ra3 = ra(conv3_3, self.up4_3(sum4), 256)
        dsn3 = self.branch3(ra3)
        sum3 = dsn3 + self.crop(self.up4_3(sum4), dsn3.size())
        up3 = self.crop(self.up3(sum3), x_size)

        # Layer2
        ra2 = ra(conv2_2, self.up3_2(sum3), 128)
        dsn2 = self.branch2(ra2)
        sum2 = dsn2 + self.crop(self.up3_2(sum3), dsn2.size())
        up2 = self.crop(self.up2(sum2), x_size)

        # Layer1 & fusion
        ra1 = ra(conv1_2, up2, 64)
        dsn1 = self.branch1(ra1)
        sum1 = dsn1 + up2
        up1 = self.crop(sum1, x_size)

        # Optional save
        if im_path_pre:
            for name, layer in zip(
                    ["conv1_2", "conv2_2", "conv3_3", "conv4_3", "conv5_3", "psp_feat", "conv5_dsn6",
                     "dsn5", "dsn4", "dsn3", "dsn2", "dsn1"],
                    [conv1_2, conv2_2, conv3_3, conv4_3, conv5_3, psp_feat, conv5_dsn6,
                     dsn5, dsn4, dsn3, dsn2, dsn1]
            ):
                im = (torch.sigmoid(layer).detach().cpu().numpy()[0] * 255).astype(np.uint8)
                if im.shape[0] == 1:
                    # 单通道灰度图
                    cv2.imwrite(f"{im_path_pre}{name}.png", im[0])
                elif im.shape[0] == 3:
                    # RGB 图
                    cv2.imwrite(f"{im_path_pre}{name}.png", im.transpose(1, 2, 0))
                elif im.shape[0] > 3:
                    # 多通道特征图，做通道平均
                    im_mean = np.mean(im, axis=0).astype(np.uint8)
                    cv2.imwrite(f"{im_path_pre}{name}_mean.png", im_mean)
                else:
                    print(f"[WARN] Skipping {name}: unsupported channel count {im.shape[0]}")

        return torch.sigmoid(up1), torch.sigmoid(up2), torch.sigmoid(up3), \
               torch.sigmoid(up4), torch.sigmoid(up5), torch.sigmoid(up6)

    def train(self, batch_x, batch_y):
        outs = self.forward(batch_x)
        loss_fn = nn.MSELoss()
        loss = sum([loss_fn(o, batch_y) for o in outs])
        self.optim.zero_grad()
        loss.backward()
        self.optim.step()
        return loss.item()

    def test(self, batch_x, im_path_pre=None):
        with torch.no_grad():
            outs = self.forward(batch_x, im_path_pre)
            return sum([o for o in outs]) / len(outs)

    def crop(self, upsampled, x_size):
        c = (upsampled.size(2) - x_size[2]) // 2
        _c = x_size[2] - upsampled.size(2) + c
        if c == _c == 0:
            return upsampled
        return upsampled[:, :, c:_c, c:_c]

    @staticmethod
    def weights_init(m):
        if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
            n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            m.weight.data.normal_(0, math.sqrt(2. / n))
