import torch
import torch.nn as nn
import torch.nn.functional as F
import json
import torchvision
import numpy as np
from torch.cuda import device

from DWT import DWT_2D, IDWT_2D
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score
import pandas as pd
from torch.optim.lr_scheduler import StepLR
from sklearn.preprocessing import MultiLabelBinarizer
from transformers import BertModel, BertTokenizer
from albumentations import Compose, RandomCrop, Flip, Normalize

class Cov2x2_BN(nn.Module):
    """2D 卷积 + BatchNorm"""

    def __init__(self, in_channels, out_channels, kernel_size, padding, with_BN):
        super(Cov2x2_BN, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)
        self.with_BN = with_BN
        if self.with_BN:
            self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        out = self.conv(x)
        if self.with_BN:
            out = self.bn(out)
        return F.relu(out)


class Neuron_WaveSNet_V2_2D(nn.Module):
    """
    使用小波变换进行下采样的 2D UNet
    """

    def __init__(self, num_class=2, with_BN=True, channel_width=4, wavename='haar', json_file='json/data3.json',
                 bert_path='bert-base-uncased'):
        super(Neuron_WaveSNet_V2_2D, self).__init__()

        # 编码器部分 (2D 卷积)
        self.cov2d_11_en = Cov2x2_BN(in_channels=1, out_channels=1 * channel_width, kernel_size=3, padding=1,
                                     with_BN=with_BN)
        self.cov2d_12_en = Cov2x2_BN(in_channels=1 * channel_width, out_channels=1 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.downsampling_1 = DWT_2D(wavename=wavename)

        self.cov2d_21_en = Cov2x2_BN(in_channels=1 * channel_width, out_channels=2 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_22_en = Cov2x2_BN(in_channels=2 * channel_width, out_channels=2 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.downsampling_2 = DWT_2D(wavename=wavename)

        self.cov2d_31_en = Cov2x2_BN(in_channels=2 * channel_width, out_channels=4 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_32_en = Cov2x2_BN(in_channels=4 * channel_width, out_channels=4 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.downsampling_3 = DWT_2D(wavename=wavename)

        self.cov2d_41_en = Cov2x2_BN(in_channels=4 * channel_width, out_channels=8 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_42_en = Cov2x2_BN(in_channels=8 * channel_width, out_channels=8 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.downsampling_4 = DWT_2D(wavename=wavename)

        self.cov2d_51 = Cov2x2_BN(in_channels=8 * channel_width, out_channels=8 * channel_width, kernel_size=3,
                                  padding=1, with_BN=with_BN)
        self.cov2d_52 = Cov2x2_BN(in_channels=8 * channel_width, out_channels=8 * channel_width, kernel_size=3,
                                  padding=1, with_BN=with_BN)

        # # BERT 编码模块
        # self.bert_module = BertEncodingModule(bert_path, channel_width)
        # self.json_file = json_file

        # 解码器部分 (2D 卷积)
        self.upsampling_4 = nn.ConvTranspose2d(in_channels=8 * channel_width, out_channels=8 * channel_width,
                                               kernel_size=1, stride=2, output_padding=1, padding=0)
        self.cov2d_41_de = Cov2x2_BN(in_channels=16 * channel_width, out_channels=8 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_42_de = Cov2x2_BN(in_channels=8 * channel_width, out_channels=4 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)

        self.upsampling_3 = nn.ConvTranspose2d(in_channels=4 * channel_width, out_channels=4 * channel_width,
                                               kernel_size=1, stride=2, output_padding=1, padding=0)
        self.cov2d_31_de = Cov2x2_BN(in_channels=8 * channel_width, out_channels=4 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_32_de = Cov2x2_BN(in_channels=4 * channel_width, out_channels=2 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)

        self.upsampling_2 = nn.ConvTranspose2d(in_channels=2 * channel_width, out_channels=2 * channel_width,
                                               kernel_size=1, stride=2, output_padding=1, padding=0)
        self.cov2d_21_de = Cov2x2_BN(in_channels=4 * channel_width, out_channels=2 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_22_de = Cov2x2_BN(in_channels=2 * channel_width, out_channels=1 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)

        self.upsampling_1 = nn.ConvTranspose2d(in_channels=1 * channel_width, out_channels=1 * channel_width,
                                               kernel_size=1, stride=2, output_padding=1, padding=0)
        self.cov2d_11_de = Cov2x2_BN(in_channels=2 * channel_width, out_channels=1 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)
        self.cov2d_12_de = Cov2x2_BN(in_channels=1 * channel_width, out_channels=1 * channel_width, kernel_size=3,
                                     padding=1, with_BN=with_BN)

        self.cov_final = nn.Conv2d(in_channels=1 * channel_width, out_channels=num_class, kernel_size=1)

    def forward(self, input):
        # # 如果 input 形状是 5 维，使用 squeeze 去掉最后一维
        # if input.dim() == 5 and input.size(-1) == 1:
        #     input = input.squeeze(-1)  # 去掉最后一个维度
        # 编码器部分
        output_1 = self.cov2d_12_en(self.cov2d_11_en(input))
        output = self.downsampling_1(output_1)

        output_2 = self.cov2d_22_en(self.cov2d_21_en(output))
        output = self.downsampling_2(output_2)

        output_3 = self.cov2d_32_en(self.cov2d_31_en(output))
        output = self.downsampling_3(output_3)

        output_4 = self.cov2d_42_en(self.cov2d_41_en(output))
        output = self.downsampling_4(output_4)

        output = self.cov2d_52(self.cov2d_51(output))

        # # 读取 JSON 文件并提取文本描述
        # device = output.device
        # with open(self.json_file, 'r', encoding='utf-8') as f:
        #     data = json.load(f)
        #     text_data = [item.get('text1', '') for item in data]
        #
        # # BERT 编码
        # self.bert_module.to(device)
        # bert_output = self.bert_module(text_data, device)
        # bert_output = bert_output.view(bert_output.size(0), -1, 1, 1)
        #
        # # 扩展 bert_output 的批次大小
        # repeat_factor = (output.size(0) + bert_output.size(0) - 1) // bert_output.size(0)
        # bert_output = bert_output.repeat(repeat_factor, 1, 1, 1)
        # bert_output = bert_output[:output.size(0), :, :, :]
        # output = output + bert_output

        # 解码器部分
        output = self.upsampling_4(output)
        output = self.cov2d_42_de(self.cov2d_41_de(torch.cat((output, output_4), dim=1)))

        output = self.upsampling_3(output)
        output = self.cov2d_32_de(self.cov2d_31_de(torch.cat((output, output_3), dim=1)))

        output = self.upsampling_2(output)
        output = self.cov2d_22_de(self.cov2d_21_de(torch.cat((output, output_2), dim=1)))

        output = self.upsampling_1(output)
        output = self.cov2d_12_de(self.cov2d_11_de(torch.cat((output, output_1), dim=1)))

        output = self.cov_final(output)
        return output

class ConvolutionalBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(ConvolutionalBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)
        return x

class AttentionBlock(nn.Module):
    def __init__(self, in_channels):
        super(AttentionBlock, self).__init__()
        self.query_conv = nn.Conv2d(in_channels, in_channels // 2, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels, in_channels // 2, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels, in_channels, kernel_size=1)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        batch_size, C, H, W = x.size()
        query = self.query_conv(x).view(batch_size, -1, H * W)
        key = self.key_conv(x).view(batch_size, -1, H * W).permute(0, 2, 1)
        attention = self.softmax(torch.bmm(query, key))  # Compute attention weights
        value = self.value_conv(x).view(batch_size, -1, H * W)

        out = torch.bmm(value, attention.permute(0, 2, 1))  # Apply attention
        out = out.view(batch_size, C, H, W)
        return out

class EnhancedMultiScaleUNet(nn.Module):
    def __init__(self, num_classes=2):
        super(EnhancedMultiScaleUNet, self).__init__()

        # Encoder
        self.enc1 = ConvolutionalBlock(1, 64)
        self.enc2 = ConvolutionalBlock(64, 128)
        self.enc3 = ConvolutionalBlock(128, 256)
        self.enc4 = ConvolutionalBlock(256, 512)

        # Bottleneck
        self.bottleneck = ConvolutionalBlock(512, 1024)

        # Attention Block
        self.attention1 = AttentionBlock(512)
        self.attention2 = AttentionBlock(256)
        self.attention3 = AttentionBlock(128)

        # Decoder
        self.dec4 = ConvolutionalBlock(1024, 512)
        self.dec3 = ConvolutionalBlock(512, 256)
        self.dec2 = ConvolutionalBlock(256, 128)
        self.dec1 = ConvolutionalBlock(128, 64)

        # Final convolution
        self.final = nn.Conv2d(64, num_classes, kernel_size=1)

    def forward(self, x):
        # Encoding path
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)

        # Bottleneck
        bottleneck = self.bottleneck(enc4)

        # Applying attention
        enc4_attention = self.attention1(enc4)
        dec4 = self.dec4


class UNet(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UNet, self).__init__()

        # Encoder
        self.enc1 = self.conv_block(in_channels, 64)
        self.enc2 = self.conv_block(64, 128)
        self.enc3 = self.conv_block(128, 256)
        self.enc4 = self.conv_block(256, 512)

        # Bottleneck
        self.bottleneck = self.conv_block(512, 1024)

        # Decoder
        self.dec4 = self.upconv_block(1024, 512)
        self.dec3 = self.upconv_block(512, 256)
        self.dec2 = self.upconv_block(256, 128)
        self.dec1 = self.upconv_block(128, 64)

        # Final Convolution
        self.final_conv = nn.Conv2d(64, out_channels, kernel_size=1)

    def conv_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.ReLU(inplace=True)
        )

    def upconv_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)

        bottleneck = self.bottleneck(enc4)

        dec4 = self.dec4(bottleneck)
        dec4 = torch.cat((dec4, enc4), dim=1)
        dec3 = self.dec3(dec4)
        dec3 = torch.cat((dec3, enc3), dim=1)
        dec2 = self.dec2(dec3)
        dec2 = torch.cat((dec2, enc2), dim=1)
        dec1 = self.dec1(dec2)
        dec1 = torch.cat((dec1, enc1), dim=1)

        output = self.final_conv(dec1)
        return output


# 示例的 2D 卷积 + BN 模块
class Cov3x3_BN(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, with_BN=True):
        super(Cov3x3_BN, self).__init__()
        layers = [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)]
        if with_BN:
            layers.append(nn.BatchNorm2d(out_channels))
        layers.append(nn.ReLU(inplace=True))
        self.conv = nn.Sequential(*layers)

    def forward(self, x):
        return self.conv(x)

# 主网络
class Neuron_WaveSNet_V3_2D(nn.Module):
    """
    用于 2D 图像分割的 Wavelet-based U-Net
    编码器使用 2D DWT 下采样，解码器使用 2D IDWT 上采样
    """
    def __init__(self, num_class=2, with_BN=True, channel_width=4, wavename='db4'):
        super(Neuron_WaveSNet_V3_2D, self).__init__()
        # 512 x 512
        self.cov2d_11_en = Cov3x3_BN(in_channels=1, out_channels=1 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_12_en = Cov3x3_BN(in_channels=1 * channel_width, out_channels=1 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.downsampling_1 = DWT_2D(wavename=wavename)
        # 256 x 256
        self.cov2d_21_en = Cov3x3_BN(in_channels=1 * channel_width, out_channels=2 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_22_en = Cov3x3_BN(in_channels=2 * channel_width, out_channels=2 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.downsampling_2 = DWT_2D(wavename=wavename)
        # 128 x 128
        self.cov2d_31_en = Cov3x3_BN(in_channels=2 * channel_width, out_channels=4 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_32_en = Cov3x3_BN(in_channels=4 * channel_width, out_channels=4 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.downsampling_3 = DWT_2D(wavename=wavename)
        # 64 x 64
        self.cov2d_41_en = Cov3x3_BN(in_channels=4 * channel_width, out_channels=8 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_42_en = Cov3x3_BN(in_channels=8 * channel_width, out_channels=8 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.downsampling_4 = DWT_2D(wavename=wavename)

        # 32 x 32
        self.cov2d_51 = Cov3x3_BN(in_channels=8 * channel_width, out_channels=8 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)

        self.upsampling_4 = IDWT_2D(wavename=wavename)
        # 64 x 64
        self.cov2d_41_de = Cov3x3_BN(in_channels=16 * channel_width, out_channels=8 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_42_de = Cov3x3_BN(in_channels=8 * channel_width, out_channels=4 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.upsampling_3 = IDWT_2D(wavename=wavename)
        # 128 x 128
        self.cov2d_31_de = Cov3x3_BN(in_channels=8 * channel_width, out_channels=4 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_32_de = Cov3x3_BN(in_channels=4 * channel_width, out_channels=2 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.upsampling_2 = IDWT_2D(wavename=wavename)
        # 256 x 256
        self.cov2d_21_de = Cov3x3_BN(in_channels=4 * channel_width, out_channels=2 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_22_de = Cov3x3_BN(in_channels=2 * channel_width, out_channels=1 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.upsampling_1 = IDWT_2D(wavename=wavename)
        # 512 x 512
        self.cov2d_11_de = Cov3x3_BN(in_channels=2 * channel_width, out_channels=1 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)
        self.cov2d_12_de = Cov3x3_BN(in_channels=1 * channel_width, out_channels=1 * channel_width, kernel_size=3, padding=1, with_BN=with_BN)

        self.cov_final = nn.Conv2d(in_channels=1 * channel_width, out_channels=num_class, kernel_size=1)

    def forward(self, input):
        # 编码部分
        output_1 = self.cov2d_12_en(self.cov2d_11_en(input))
        LL_1, LH_1, HL_1, HH_1 = self.downsampling_1(output_1)

        output_2 = self.cov2d_22_en(self.cov2d_21_en(LL_1))
        LL_2, LH_2, HL_2, HH_2 = self.downsampling_2(output_2)

        output_3 = self.cov2d_32_en(self.cov2d_31_en(LL_2))
        LL_3, LH_3, HL_3, HH_3 = self.downsampling_3(output_3)

        output_4 = self.cov2d_42_en(self.cov2d_41_en(LL_3))
        LL_4, LH_4, HL_4, HH_4 = self.downsampling_4(output_4)

        # 中间部分
        output = self.cov2d_51(LL_4)

        # 解码部分
        output = self.upsampling_4(LL_4, LH_4, HL_4, HH_4)
        output = self.cov2d_42_de(self.cov2d_41_de(torch.cat((output, output_4), dim=1)))

        output = self.upsampling_3(LL_3, LH_3, HL_3, HH_3)
        output = self.cov2d_32_de(self.cov2d_31_de(torch.cat((output, output_3), dim=1)))

        output = self.upsampling_2(LL_2, LH_2, HL_2, HH_2)
        output = self.cov2d_22_de(self.cov2d_21_de(torch.cat((output, output_2), dim=1)))

        output = self.upsampling_1(LL_1, LH_1, HL_1, HH_1)
        output = self.cov2d_12_de(self.cov2d_11_de(torch.cat((output, output_1), dim=1)))

        output = self.cov_final(output)

        return output

class MedSegDataset(Dataset):
    def __init__(self, images, masks=None, transform=None):
        self.images = images
        self.masks = masks
        self.transform = transform

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        image = self.images[idx]
        mask = self.masks[idx] if self.masks is not None else None
        if self.transform:
            image = self.transform(image)
            if mask is not None:
                mask = self.transform(mask)
        return image, mask

    # 加载数据


train_images = np.load('datasets/images_medseg.npy')  # 读取训练图像
train_masks = np.load('datasets/masks_medseg.npy')  # 读取训练掩码
test_images = np.load('datasets/test_images_medseg.npy')  # 读取测试图像

# 数据标准化
train_images = train_images.astype('float32') / 255.0  # 将像素值归一化到 [0, 1]
test_images = test_images.astype('float32') / 255.0

# 选择目标掩码，仅保留“ground glass”和“consolidations”两个类别
train_masks = train_masks[:, :, :, :2]  # shape: (100, 512, 512, 2)
ground_glass_mask = (train_masks[:, :, :, 0] > 0).astype(float)  # ground glass -> channel 0
consolidations_mask = (train_masks[:, :, :, 1] > 0).astype(float)  # consolidations -> channel 1

# 将二值掩码组合成最终掩码
train_masks = np.maximum(ground_glass_mask, consolidations_mask)  # 将两个掩码合并，优先选择存在的区域

# 增加通道维度以符合 PyTorch 的 [batch, channels, height, width] 需求
train_images = np.expand_dims(train_images, axis=1)  # shape: (100, 1, 512, 512)
train_masks = np.expand_dims(train_masks, axis=1)  # shape: (100, 1, 512, 512)
# 将 masks 转换为类别：0 和 1
train_masks = train_masks.squeeze()  # 将单通道的维度去掉, shape: (100, 512, 512)

# 数据增强
transform = Compose([
    RandomCrop(height=512, width=512),
    Flip(),
    Normalize()
])

# 创建数据加载器
batch_size = 16
train_loader = DataLoader(list(zip(train_images, train_masks)), batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_images, batch_size=batch_size, shuffle=False)

# 初始化模型、损失函数和优化器
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Neuron_WaveSNet_V3_2D()  # 假设这是您的模型定义
criterion = nn.CrossEntropyLoss()  # 更改为交叉熵损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)

# # 定义学习率调度器
scheduler = StepLR(optimizer, step_size=20, gamma=0.5)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for images, masks in train_loader:
        images = torch.tensor(images, dtype=torch.float)
        masks = torch.tensor(masks, dtype=torch.long) # 确保掩码是 long 类型，适合用于交叉熵损失
        images = images.squeeze(-1)  # Remove the last dimension if it's 1
        # 运行模型并计算损失
        outputs = model(images)

        # 注意：outputs形状为 (N, 2, H, W)，masks形状为 (N, H, W)
        loss = criterion(outputs, masks)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

    scheduler.step()
    print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {running_loss / len(train_loader)}")

# 测试集预测
model.eval()
predictions = []
with torch.no_grad():
    for images in test_loader:
        images = torch.tensor(images, dtype=torch.float)
        images = images.permute(0, 3, 1, 2)  # 调整形状为 [batch_size, channels, height, width]
        outputs = model(images)
        preds = torch.argmax(outputs, dim=1)  # 获取每个像素的预测类别
        predictions.append(preds.cpu().numpy())

    # 准备提交文件
submission_data = []
height, width = 512, 512  # 假设每张图像的高和宽都是 512

for idx, pred in enumerate(predictions):
    # 将每张图像展平成一维数组
    pred_flatten = pred.flatten()

    for pixel_idx in range(len(pred_flatten)):
        # 唯一 ID，使用整数 ID
        base_id = idx * height * width + pixel_idx

        # 添加 ground glass 预测
        submission_data.append({
            'Id': base_id,  # 唯一 ID of int type
            'Predicted': 1 if pred_flatten[pixel_idx] == 0 else 0  # ground glass 预测
        })

        # 添加 consolidation 预测
        submission_data.append({
            'Id': base_id + len(pred_flatten),  # 确保与前一行的 ID 不重复
            'Predicted': 1 if pred_flatten[pixel_idx] == 1 else 0  # consolidation 预测
        })

    # 创建 DataFrame 并保存为 CSV
submission = pd.DataFrame(submission_data)
submission.to_csv('submission.csv', index=False)

# 检查输出的行数
print(f'Submission file created with {len(submission_data)} rows: {len(submission)}')