from calendar import c
from select import select
import torch
from torch import nn
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
from sklearn.metrics import roc_curve, auc
import numpy as np
from u_net_mf import *
def calculate_pixel_center(grid_x, grid_y, image_size=512, grid_size=8):
    grid_unit = image_size // grid_size
    center_x = grid_x * grid_unit + grid_unit // 2
    center_y = grid_y * grid_unit + grid_unit // 2
    return center_x, center_y
    
class CustomBCELossWithAuxiliaryPoints(nn.Module):
    def __init__(self, center_point=(256, 256), step=16, max_layers=9, weight_center=0.7, weight_aux=0.3):
        """
        带辅助点的损失函数
        :param center_point: 中心点的像素坐标 (y, x)
        :param step: 辅助点扩展步长
        :param max_layers: 最大扩展层数
        :param weight_center: 中心点损失的权重
        :param weight_aux: 辅助点损失的权重
        """
        super(CustomBCELossWithAuxiliaryPoints, self).__init__()
        self.center_point = center_point
        self.step = step
        self.max_layers = max_layers
        self.weight_center = weight_center
        self.weight_aux = weight_aux

    def forward(self, inputs, targets):
        batch_size, channels, height, width = inputs.shape
        assert channels == 1, "Binary classification requires single-channel inputs."

        # 中心点损失
        center_y, center_x = self.center_point
        center_input = inputs[:, :, center_y, center_x]
        center_target = targets[:, :, center_y, center_x]
        center_loss = F.binary_cross_entropy_with_logits(center_input, center_target.float())

        # 辅助点损失
        auxiliary_points = generate_auxiliary_points(
            center_x=center_x, center_y=center_y, d=self.step, max_layers=self.max_layers, image_size=height
        )
        aux_inputs = []
        aux_targets = []
        for y, x in auxiliary_points:
            aux_inputs.append(inputs[:, :, y, x])
            aux_targets.append(targets[:, :, y, x])
        aux_inputs = torch.stack(aux_inputs, dim=-1).view(batch_size, -1)
        aux_targets = torch.stack(aux_targets, dim=-1).view(batch_size, -1)
        aux_loss = F.binary_cross_entropy_with_logits(aux_inputs, aux_targets)

        # 综合损失
        total_loss = self.weight_center * center_loss + self.weight_aux * aux_loss
        return total_loss


import torch

import torch

def generate_auxiliary_points(center_x, center_y, current_layer, image_size=512, exclude_point=None, image=None,
                              threshold=0.1):
    """
    根据当前扩展层生成辅助点列表，包含内部点并排除目标点，确保辅助点避开黑边区域。

    :param center_x: 中心点的 x 坐标
    :param center_y: 中心点的 y 坐标
    :param current_layer: 当前扩展层数（1到9）
    :param image_size: 图像尺寸（默认 512x512）
    :param exclude_point: 排除的点坐标 (y, x)
    :param image: 图像本身（RGB图像）
    :param threshold: 用于判断黑边区域的阈值（可调）
    :return: 当前层的辅助点张量，形状为 [N, 2]
    """
    d = 2 ** current_layer - 1

    # 计算当前层边界
    top = max(center_y - d, 0)
    bottom = min(center_y + d, image_size - 1)
    left = max(center_x - d, 0)
    right = min(center_x + d, image_size - 1)

    # 当前层所有点
    y_coords, x_coords = torch.meshgrid(
        torch.arange(top, bottom + 1),
        torch.arange(left, right + 1)
    )
    full_points = torch.stack([y_coords.flatten(), x_coords.flatten()], dim=1)

    # 排除目标点
    if exclude_point is not None:
        exclude_mask = (full_points[:, 0] == exclude_point[0]) & (full_points[:, 1] == exclude_point[1])
        full_points = full_points[~exclude_mask]
    return full_points
def evaluate_model1(model, dataloader):
        device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        # 加载当前扩展层的最佳模型
        model = U_Net().to(device)
        all_labels = []
        all_predictions = []
        model.eval()
        with torch.no_grad():
            for data in dataloader:
                    if len(data) == 3:  # 对应测试集
                        img, label, _ = data  # 解包三个值，忽略文件名
                    else:
                        img, label = data  # 训练集或验证集，解包两个值
                    img, label = img.to(device), label.to(device)  # 移动到设备
                    pred = model(img)
                    pred_np = pred.detach().cpu().numpy()  # 转换为 NumPy 数组
                    label_np = label.detach().cpu().numpy()  # 转换为 NumPy 数组
                    pred_target = pred_np[:, :, target_pixel[0], target_pixel[1]]
                    label_target = label_np[:, :, target_pixel[0], target_pixel[1]]
                    #print(label_target.sum())
                    from sklearn.preprocessing import MinMaxScaler

                    # 假设pred_target是您的预测结果
                    pred_target_flattened = pred_target.flatten()

                    # 创建MinMaxScaler对象
                    scaler = MinMaxScaler()

                    # 将数据归一化到0和1之间
                    pred_target_normalized = scaler.fit_transform(pred_target_flattened.reshape(-1, 1)).flatten()

                    # 将归一化后的数据添加到all_predictions列表
                    all_predictions.extend(pred_target_normalized)
                    all_labels.extend(label_target.flatten())      # 目标点真实标签
            # 使用 ROC 曲线计算敏感性和特异性
            fpr, tpr, thresholds = roc_curve(all_labels, all_predictions)
            sensitivities = tpr
            specificities = 1 - fpr

            # 找到敏感性和特异性最接近的点
            avg_diff =abs (sensitivities - specificities)
            best_index = avg_diff.argmin()
            best_sensitivity = sensitivities[best_index]
            best_specificity = specificities[best_index]
            return best_sensitivity, best_specificity
def calculate_dynamic_weight(target_coord, aux_coords, aux_labels, min_positive_ratio=0.05, sigma=10):

    # 计算正样本比例
    positive_ratio = torch.sum(aux_labels) / aux_labels.numel()

    # 若正样本比例过低，直接返回0

    # 计算空间权重（高斯衰减）
    aux_coords_np = np.array(aux_coords)  # 转换为numpy数组
    distances = np.linalg.norm(aux_coords_np - target_coord, axis=1)  # 计算欧氏距离
    spatial_weights = np.exp(-distances**2 / (2 * sigma**2))
    spatial_weights = spatial_weights / spatial_weights.sum()  # 归一化

    # 综合权重 = 正样本比例 * 平均空间权重
    dynamic_weight = positive_ratio * np.mean(spatial_weights)
    
    return dynamic_weight