import os

import numpy as np
import torch
import torch.nn.functional as F
from skimage import io
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms


class Dataset(torch.utils.data.Dataset):
    def __init__(self, root_dir, target_size=256, min_size=64, max_size=1024, is_train=True):
        self.root_dir = root_dir
        self.target_size = target_size
        self.min_size = min_size
        self.max_size = max_size
        self.is_train = is_train

        self.folders = [d for d in os.listdir(root_dir)
                        if os.path.isdir(os.path.join(root_dir, d))]

    def process_image_and_mask(self, image, mask):
        # 1. 检查图片尺寸是否在合理范围内
        h, w = image.shape[-2:]
        if h < self.min_size or w < self.min_size:
            scale = self.min_size / min(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        elif h > self.max_size or w > self.max_size:
            scale = self.max_size / max(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 2. 保持长宽比的调整策略
        h, w = image.shape[-2:]
        if h > w:
            new_h = self.target_size
            new_w = int(w * (new_h / h))
        else:
            new_w = self.target_size
            new_h = int(h * (new_w / w))

        image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                              mode='bilinear', align_corners=False).squeeze(0)
        if mask is not None:
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 3. 填充到目标尺寸
        pad_h = self.target_size - new_h
        pad_w = self.target_size - new_w

        pad_h1, pad_h2 = pad_h // 2, pad_h - (pad_h // 2)
        pad_w1, pad_w2 = pad_w // 2, pad_w - (pad_w // 2)

        padding = (pad_w1, pad_w2, pad_h1, pad_h2)
        image = F.pad(image, padding, mode='constant', value=0)
        if mask is not None:
            mask = F.pad(mask, padding, mode='constant', value=0)

        return image, mask

    def __getitem__(self, idx):
        folder_name = self.folders[idx]
        folder_path = os.path.join(self.root_dir, folder_name)

        # Load image
        image_path = os.path.join(folder_path, 'images', os.listdir(os.path.join(folder_path, 'images'))[0])
        image = io.imread(image_path)

        # if image.shape[-1] == 4:
        #     image = image[..., :3]
        image = np.transpose(image, (2, 0, 1))
        image = torch.from_numpy(image).float() / 255.0

        if self.is_train:
            # Load and combine masks with explicit normalization
            mask_dir = os.path.join(folder_path, 'masks')
            mask_files = sorted(os.listdir(mask_dir))
            combined_mask = None

            for mask_file in mask_files:
                mask_path = os.path.join(mask_dir, mask_file)
                # 确保mask是灰度图并归一化到0-1
                mask = io.imread(mask_path, as_gray=True)
                # 如果mask不是0-1范围，进行归一化
                if mask.max() > 1:
                    mask = mask / 255.0
                if combined_mask is None:
                    combined_mask = mask
                else:
                    combined_mask = np.clip(combined_mask + mask, 0, 1)

            # 确保mask是浮点数类型并在0-1范围内
            combined_mask = combined_mask.astype(np.float32)
            combined_mask = np.clip(combined_mask, 0, 1)
            combined_mask = torch.from_numpy(combined_mask).float()
            combined_mask = combined_mask.unsqueeze(0)  # Add channel dim

            # Process image and mask
            image, mask = self.process_image_and_mask(image, combined_mask)

            # 最后再次确保mask在0-1范围内
            mask = torch.clamp(mask, 0, 1)

            return image, mask, {'folder_name': folder_name}
        else:
            # Process image only
            image, _ = self.process_image_and_mask(image, None)
            return image, {'folder_name': folder_name}

    def __len__(self):
        return len(self.folders)


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.train()
    for epoch in range(num_epochs):
        print(f'第{epoch + 1}轮训练:')
        for images, masks, _ in dataloader:
            images = images.float().to(device)
            masks = masks.float().to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, masks)
            loss.backward()
            optimizer.step()

        print(f'Epoch {epoch}, Loss: {loss.item()}')
        if (epoch + 1) % 3 == 0:
            torch.save(model.state_dict(), f'U_Cell-{epoch}.pt')


import torch
import torch.nn as nn
from torch.nn import init


def init_weights(net, init_type='normal', gain=0.02):
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            if init_type == 'normal':
                init.normal_(m.weight.data, 0.0, gain)
            elif init_type == 'xavier':
                init.xavier_normal_(m.weight.data, gain=gain)
            elif init_type == 'kaiming':
                init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                init.orthogonal_(m.weight.data, gain=gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
            if hasattr(m, 'bias') and m.bias is not None:
                init.constant_(m.bias.data, 0.0)
        elif classname.find('BatchNorm2d') != -1:
            init.normal_(m.weight.data, 1.0, gain)
            init.constant_(m.bias.data, 0.0)

    print('initialize network with %s' % init_type)
    net.apply(init_func)


class conv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(conv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


class up_conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2),
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.up(x)
        return x


class Attention_block(nn.Module):
    def __init__(self, F_g, F_l, F_int):
        super(Attention_block, self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1 + x1)
        psi = self.psi(psi)

        return x * psi


class AttU_Net(nn.Module):
    def __init__(self, img_ch=3, output_ch=1):
        super(AttU_Net, self).__init__()

        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(ch_in=img_ch, ch_out=64)
        self.Conv2 = conv_block(ch_in=64, ch_out=128)
        self.Conv3 = conv_block(ch_in=128, ch_out=256)
        self.Conv4 = conv_block(ch_in=256, ch_out=512)
        self.Conv5 = conv_block(ch_in=512, ch_out=1024)

        self.Up5 = up_conv(ch_in=1024, ch_out=512)
        self.Att5 = Attention_block(F_g=512, F_l=512, F_int=256)
        self.Up_conv5 = conv_block(ch_in=1024, ch_out=512)

        self.Up4 = up_conv(ch_in=512, ch_out=256)
        self.Att4 = Attention_block(F_g=256, F_l=256, F_int=128)
        self.Up_conv4 = conv_block(ch_in=512, ch_out=256)

        self.Up3 = up_conv(ch_in=256, ch_out=128)
        self.Att3 = Attention_block(F_g=128, F_l=128, F_int=64)
        self.Up_conv3 = conv_block(ch_in=256, ch_out=128)

        self.Up2 = up_conv(ch_in=128, ch_out=64)
        self.Att2 = Attention_block(F_g=64, F_l=64, F_int=32)
        self.Up_conv2 = conv_block(ch_in=128, ch_out=64)

        self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        # encoding path
        x1 = self.Conv1(x)

        x2 = self.Maxpool(x1)
        x2 = self.Conv2(x2)

        x3 = self.Maxpool(x2)
        x3 = self.Conv3(x3)

        x4 = self.Maxpool(x3)
        x4 = self.Conv4(x4)

        x5 = self.Maxpool(x4)
        x5 = self.Conv5(x5)

        # decoding + concat path
        d5 = self.Up5(x5)
        x4 = self.Att5(g=d5, x=x4)
        d5 = torch.cat((x4, d5), dim=1)
        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        x3 = self.Att4(g=d4, x=x3)
        d4 = torch.cat((x3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        x2 = self.Att3(g=d3, x=x2)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        x1 = self.Att2(g=d2, x=x1)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        d1 = self.Conv_1x1(d2)

        return nn.Sigmoid()(d1)


class UNetPlusPlusLoss(nn.Module):
    def __init__(self, ce_weight=0.5, dice_weight=0.5):
        super().__init__()
        self.ce_weight = ce_weight
        self.dice_weight = dice_weight

        # 如果输出已经经过sigmoid，应该使用BCE而不是BCEWithLogitsLoss
        self.ce = nn.BCELoss()  # 修改这里

    def dice_loss(self, pred, target):
        # 这里不需要再做sigmoid了，因为输入已经是sigmoid后的结果
        pred = pred.view(-1)
        target = target.view(-1)

        intersection = (pred * target).sum()
        union = pred.sum() + target.sum()

        dice = 1 - (2.0 * intersection + 1e-7) / (union + 1e-7)
        return dice

    def forward(self, pred, target):
        # pred已经是sigmoid后的结果，直接用于计算loss
        ce_loss = self.ce(pred, target)
        dice_loss = self.dice_loss(pred, target)

        total_loss = self.ce_weight * ce_loss + self.dice_weight * dice_loss
        return total_loss


if __name__ == '__main__':
    dataset = Dataset(
        root_dir='/kaggle/input/cell-seg/train',
        target_size=256,
        min_size=64,
        max_size=1024
    )

    # 数据集与数据加载器
    # 可以根据需求进行数据增强操作
    transform = transforms.Compose([
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    dataloader = DataLoader(dataset, batch_size=10, shuffle=True)

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = AttU_Net(4, 1).to(device)
    criterion = UNetPlusPlusLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, criterion, optimizer, device)
