import os

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.models as models
from skimage import io
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms


class AtrousConv2d(nn.Module):
    """带孔卷积层的实现"""

    def __init__(self, in_channels, out_channels, kernel_size, dilation=1, padding=0):
        super(AtrousConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
                              padding=padding, dilation=dilation)

    def forward(self, x):
        return self.conv(x)


class ASPP(nn.Module):
    """Atrous Spatial Pyramid Pooling"""

    def __init__(self, in_channels, out_channels):
        super(ASPP, self).__init__()

        self.conv_1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        self.conv_3x3_1 = AtrousConv2d(in_channels, out_channels, kernel_size=3,
                                       dilation=6, padding=6)
        self.conv_3x3_2 = AtrousConv2d(in_channels, out_channels, kernel_size=3,
                                       dilation=12, padding=12)
        self.conv_3x3_3 = AtrousConv2d(in_channels, out_channels, kernel_size=3,
                                       dilation=18, padding=18)

        self.global_avg_pool = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Conv2d(in_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )

        self.output = nn.Sequential(
            nn.Conv2d(out_channels * 5, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )

    def forward(self, x):
        size = x.size()[2:]

        conv_1x1 = self.conv_1x1(x)
        conv_3x3_1 = self.conv_3x3_1(x)
        conv_3x3_2 = self.conv_3x3_2(x)
        conv_3x3_3 = self.conv_3x3_3(x)

        global_features = self.global_avg_pool(x)
        global_features = F.interpolate(global_features, size=size,
                                        mode='bilinear', align_corners=True)

        concat_features = torch.cat([conv_1x1, conv_3x3_1, conv_3x3_2,
                                     conv_3x3_3, global_features], dim=1)

        return self.output(concat_features)


class DeepLab(nn.Module):
    def __init__(self, num_classes):
        super(DeepLab, self).__init__()

        # 使用预训练的ResNet作为backbone
        resnet = models.resnet101()
        self.layer0 = nn.Sequential(
            resnet.conv1,
            resnet.bn1,
            resnet.relu,
            resnet.maxpool
        )
        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        # ASPP模块
        self.aspp = ASPP(2048, 256)

        # Low-level features的处理
        self.low_level_conv = nn.Sequential(
            nn.Conv2d(256, 48, 1, bias=False),
            nn.BatchNorm2d(48),
            nn.ReLU()
        )

        # 解码器部分
        self.decoder = nn.Sequential(
            nn.Conv2d(304, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, num_classes, 1)
        )

    def forward(self, x):
        input_size = x.size()[2:]

        x = self.layer0(x)
        x = self.layer1(x)
        low_level_feat = x

        low_level_feat = self.low_level_conv(low_level_feat)

        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.aspp(x)

        x = F.interpolate(x, size=low_level_feat.shape[2:],
                          mode='bilinear', align_corners=True)

        x = torch.cat([x, low_level_feat], dim=1)

        x = self.decoder(x)

        output = F.interpolate(x, size=input_size, mode='bilinear', align_corners=True)
        return nn.Sigmoid()(output)


class CustomDataset(Dataset):
    def __init__(self, input_dir, mask_dir, transform=None):
        self.input_dir = input_dir
        self.mask_dir = mask_dir
        self.input_name = os.listdir(input_dir)
        self.transform = transform

    def __len__(self):
        return len(self.input_name)

    def __getitem__(self, idx):
        img_path = os.path.join(self.input_dir, self.input_name[idx])
        mask_path = os.path.join(self.mask_dir, self.input_name[idx])

        image = io.imread(img_path)
        mask = io.imread(mask_path, as_gray=True)
        mask = mask.squeeze(0)  # 先去掉大小为1的维度
        mask = mask.reshape(image.shape[0], image.shape[1], 1)  # 在最后一个维度添加一个大小为1的维度

        # 调整图像大小
        image = transforms.Resize((256, 256))(transforms.ToTensor()(image))
        mask = transforms.Resize((256, 256))(transforms.ToTensor()(mask))

        # 应用数据转换
        if self.transform:
            image = self.transform(image)

        return image, mask


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        batch_count = 0

        print(f'Epoch {epoch + 1}/{num_epochs}:')
        for i, (images, masks) in enumerate(dataloader):
            images = images.float().to(device)
            masks = masks.float().to(device)

            optimizer.zero_grad()
            outputs = model(images)

            # Add shape validation
            if outputs.shape != masks.shape:
                print(f"Shape mismatch - outputs: {outputs.shape}, masks: {masks.shape}")
                masks = masks.squeeze(1)

            # Verify mask values are in [0, 1]
            if masks.max() > 1 or masks.min() < 0:
                raise ValueError(f"Mask values out of range [0, 1]: min={masks.min()}, max={masks.max()}")

            loss = criterion(outputs, masks)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            batch_count += 1

            if (i + 1) % 10 == 0:
                print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(dataloader)}], '
                      f'Loss: {loss.item():.4f}')

        avg_loss = total_loss / batch_count
        print(f'Epoch [{epoch + 1}/{num_epochs}], Average Loss: {avg_loss:.4f}')

        if (epoch + 1) % 3 == 0:
            torch.save(model.state_dict(), f'../pt_file/Cell-{epoch}.pt')


if __name__ == '__main__':
    transform = transforms.Compose([
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    input_dir = '../unet_img/imgs/val'
    mask_dir = '../unet_img/masks/val'

    try:
        dataset = CustomDataset(input_dir, mask_dir, transform=transform)
        dataloader = DataLoader(dataset, batch_size=8, shuffle=True, num_workers=2)

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Using device: {device}")

        model = DeepLab(num_classes=1).to(device)
        criterion = nn.BCELoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)

        train(model, dataloader, criterion, optimizer, device)

    except Exception as e:
        print(f"An error occurred: {str(e)}")
        raise
