from calendar import c
from select import select
import torch
from torch import nn
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
def calculate_pixel_center(grid_x, grid_y, image_size=512, grid_size=8):
    grid_unit = image_size // grid_size
    center_x = grid_x * grid_unit + grid_unit // 2
    center_y = grid_y * grid_unit + grid_unit // 2
    return center_x, center_y
    
def sample_indices(height, width):
    # 生成每隔2行和每隔2列的索引
    rows = torch.arange(0, height, 2)
    cols = torch.arange(0, width, 2)
    indices = torch.stack(torch.meshgrid(rows, cols), dim=2).reshape(2, -1)
    return indices.T  # 返回[N, 2]，每行是一个(x, y)索引对
import torch
import torch.nn as nn

import torch

def sample_indices(height, width, center_x=None, center_y=None, step=2, max_layers=None):
    """
    从图像中心点向外扩展进行矩形采样。
    - height, width: 图像的高度和宽度
    - center_x, center_y: 中心点的坐标，如果为 None，则默认图像的中心点
    - step: 每层矩形扩展的步长
    - max_layers: 最大扩展层数，控制扩展的范围
    """
    # 默认中心点为图像几何中心
    if center_x is None:
        center_x = width // 2
    if center_y is None:
        center_y = height // 2

    indices = []
    layer = 0  # 初始层数

    while True:
        # 计算当前层的边界
        top = max(center_y - layer * step, 0)
        bottom = min(center_y + layer * step, height - 1)
        left = max(center_x - layer * step, 0)
        right = min(center_x + layer * step, width - 1)

        # 如果达到图像边界或指定最大层数，则停止扩展
        if (top == 0 and bottom == height - 1 and left == 0 and right == width - 1) or (max_layers and layer >= max_layers):
            break

        # 采样当前矩形层的四条边
        # 上边
        for x in range(left, right + 1, step):
            indices.append((top, x))
        # 下边
        for x in range(left, right + 1, step):
            indices.append((bottom, x))
        # 左边
        for y in range(top + step, bottom, step):
            indices.append((y, left))
        # 右边
        for y in range(top + step, bottom, step):
            indices.append((y, right))

        # 扩展到下一层
        layer += 1

    # 转换为 tensor 格式
    indices = torch.tensor(indices)
    return indices

def calculate_sampling_accuracy(pred, label, height, width, step=2, max_layers=20):
    """
    根据预测和实际标签计算所有采样点的精确度。
    pred: 模型输出的logits。
    label: 真实标签。
    height, width: 图像的高度和宽度。
    step: 采样步长。
    max_layers: 采样的最大层数。
    """
    indices = sample_indices(height, width, step=step, max_layers=max_layers)
    sampled_pred = []
    sampled_label = []
    
    # 提取所有采样点的预测值和实际值
    for (y, x) in indices:
        sampled_pred.append(torch.round(torch.sigmoid(pred[:, :, y, x])))
        sampled_label.append(label[:, :, y, x])
    
    # 堆叠并展平采样点以进行精确度计算
    sampled_pred = torch.stack(sampled_pred, dim=-1).view(-1)
    sampled_label = torch.stack(sampled_label, dim=-1).view(-1)
    
    # 计算所有采样点的精确度
    correct = (sampled_pred == sampled_label).float().sum()
    total = sampled_label.numel()
    accuracy = correct / total
    
    return accuracy.item()


class CustomBCELoss(nn.Module):
    def __init__(self, step=2, max_layers=20, mode="both"):
        """
        :param step: 每层扩展的步长
        :param max_layers: 最大扩展层数
        :param mode: 控制计算的损失范围
                     - "center_only": 仅计算中心点损失
                     - "sampling_only": 仅计算采样点损失
                     - "both": 同时计算中心点和采样点损失
        """
        super(CustomBCELoss, self).__init__()
        self.step = step
        self.max_layers = max_layers
        self.mode = mode

    def forward(self, inputs, targets):
        batch_size, channels, height, width = inputs.shape
        assert channels == 1, "For binary classification, the number of channels must be 1."

        # 如果仅计算中心点损失，直接返回中心点损失
        if self.mode == "center_only":
            center_y, center_x = height // 2, width // 2
            center_input = inputs[:, :, center_y, center_x]
            center_target = targets[:, :, center_y, center_x]
            return F.binary_cross_entropy_with_logits(center_input, center_target.float())

        # 获取从中心点向外扩展的采样索引
        indices = sample_indices(height, width, step=self.step, max_layers=self.max_layers)

        # 仅计算采样点损失
        if self.mode == "sampling_only":
            sampled_inputs = []
            sampled_targets = []
            for (y, x) in indices:
                sampled_inputs.append(inputs[:, :, y, x])
                sampled_targets.append(targets[:, :, y, x])
            sampled_inputs = torch.stack(sampled_inputs, dim=-1).view(batch_size, -1)
            sampled_targets = torch.stack(sampled_targets, dim=-1).view(batch_size, -1)
            return F.binary_cross_entropy_with_logits(sampled_inputs, sampled_targets)

        # 同时计算中心点和采样点损失
        elif self.mode == "both":
            sampled_inputs = []
            sampled_targets = []
            for (y, x) in indices:
                sampled_inputs.append(inputs[:, :, y, x])
                sampled_targets.append(targets[:, :, y, x])
            sampled_inputs = torch.stack(sampled_inputs, dim=-1).view(batch_size, -1)
            sampled_targets = torch.stack(sampled_targets, dim=-1).view(batch_size, -1)
            sampled_loss = F.binary_cross_entropy_with_logits(sampled_inputs, sampled_targets)

            # 计算中心点损失
            center_y, center_x = height // 2, width // 2
            center_input = inputs[:, :, center_y, center_x]
            center_target = targets[:, :, center_y, center_x]
            center_loss = F.binary_cross_entropy_with_logits(center_input, center_target.float())

            # 合并损失
            return 0.7 * center_loss + 0.3 * sampled_loss
   

def multi_center_loss(inputs, targets, center_points):
    """
    计算多个中心点的损失
    :param inputs: 模型输出 (logits)
    :param targets: 真实标签
    :param center_points: 多个中心点坐标 [(y1, x1), (y2, x2), ...]
    :return: 所有中心点的平均损失
    """
    total_loss = 0
    for (center_y, center_x) in center_points:
        center_input = inputs[:, :, center_y, center_x]
        center_target = targets[:, :, center_y, center_x]
        total_loss += F.binary_cross_entropy_with_logits(center_input, center_target.float())
    return total_loss / len(center_points)

def calculate_sampling_accuracy(pred, label, height, width, step=2, max_layers=20):
    """
    计算所有采样点的预测精确度。
    pred: 模型预测输出 (logits)。
    label: 真实标签。
    height, width: 图像的高度和宽度。
    step: 每层扩展步长。
    max_layers: 最大扩展层数。
    """
    indices = sample_indices(height, width, step=step, max_layers=max_layers)
    print("Sampled Indices:", indices)
    sampled_pred = []
    sampled_label = []
    
    # 提取所有采样点的预测值和真实值
    for (y, x) in indices:
        sampled_pred.append(torch.round(torch.sigmoid(pred[:, :, y, x])))
        sampled_label.append(label[:, :, y, x])
    
    # 将采样的点堆叠为 tensor 并展平
    sampled_pred = torch.stack(sampled_pred, dim=-1).view(-1)
    sampled_label = torch.stack(sampled_label, dim=-1).view(-1)
    
    # 计算所有采样点的精确度
    correct = (sampled_pred == sampled_label).float().sum()
    total = sampled_label.numel()
    accuracy = correct / total
    
    return accuracy.item()

import torch
import torch.nn as nn

class PixelWiseCrossEntropyLoss(nn.Module):
    def __init__(self):
        super(PixelWiseCrossEntropyLoss, self).__init__()
        self.bce_loss = nn.BCELoss(reduction='none')  # 不指定 reduction，保留每个像素点的损失

    def forward(self, outputs, targets):
        """
        计算512x512全像素点的交叉熵损失，并取平均值。
        :param outputs: 网络输出 (batch_size, 1, 512, 512)
        :param targets: 真实标签 (batch_size, 1, 512, 512)
        :return: 损失值
        """
        # 确保输出经过sigmoid变换，值在[0, 1]范围
        outputs = torch.sigmoid(outputs)

        # 逐像素计算 BCE 损失
        pixel_losses = self.bce_loss(outputs, targets)

        # 对每张图片取平均值，再对 batch 取平均
        loss = pixel_losses.mean()  # 先对图片维度取平均，再对 batch 取平均

        return loss
