
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import cv2
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from tqdm import tqdm
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt


# U-Net 模型定义
class U_Net(nn.Module):
    def __init__(self):
        super(U_Net, self).__init__()
        self.conv11 = nn.Conv2d(3, 32, 3, padding=1)
        self.relu = nn.ReLU()
        self.conv12 = nn.Conv2d(32, 32, 3, padding=1)
        self.maxpool = nn.MaxPool2d(2)
        self.conv21 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv22 = nn.Conv2d(64, 64, 3, padding=1)
        self.conv31 = nn.Conv2d(64, 128, 3, padding=1)
        self.conv32 = nn.Conv2d(128, 128, 3, padding=1)
        self.conv41 = nn.Conv2d(128, 256, 3, padding=1)
        self.conv42 = nn.Conv2d(256, 256, 3, padding=1)
        self.conv51 = nn.Conv2d(256, 512, 3, padding=1)
        self.conv52 = nn.Conv2d(512, 512, 3, padding=1)
        self.upconv6 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2, padding=0)
        self.conv61 = nn.Conv2d(512, 256, 3, padding=1)
        self.conv62 = nn.Conv2d(256, 256, 3, padding=1)
        self.upconv7 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, padding=0)
        self.conv71 = nn.Conv2d(256, 128, 3, padding=1)
        self.conv72 = nn.Conv2d(128, 128, 3, padding=1)
        self.upconv8 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, padding=0)
        self.conv81 = nn.Conv2d(128, 64, 3, padding=1)
        self.conv82 = nn.Conv2d(64, 64, 3, padding=1)
        self.upconv9 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, padding=0)
        self.conv91 = nn.Conv2d(64, 32, 3, padding=1)
        self.conv92 = nn.Conv2d(32, 32, 3, padding=1)
        self.conv93 = nn.Conv2d(32, 1, 1, padding=0)

    def forward(self, x):
        x = self.conv11(x)
        x = self.relu(x)
        x = self.conv12(x)
        x1 = self.relu(x)
        x = self.maxpool(x1)
        x = self.conv21(x)
        x = self.relu(x)
        x = self.conv22(x)
        x2 = self.relu(x)
        x = self.maxpool(x2)
        x = self.conv31(x)
        x = self.relu(x)
        x = self.conv32(x)
        x3 = self.relu(x)
        x = self.maxpool(x3)
        x = self.conv41(x)
        x = self.relu(x)
        x = self.conv42(x)
        x4 = self.relu(x)
        x = self.maxpool(x4)
        x = self.conv51(x)
        x = self.relu(x)
        x = self.conv52(x)
        x5 = self.relu(x)
        x = self.upconv6(x5)
        x = torch.cat((x4, x), 1)
        x = self.conv61(x)
        x = self.relu(x)
        x = self.conv62(x)
        x = self.relu(x)
        x = self.upconv7(x)
        x = torch.cat((x3, x), 1)
        x = self.conv71(x)
        x = self.relu(x)
        x = self.conv72(x)
        x = self.relu(x)
        x = self.upconv8(x)
        x = torch.cat((x2, x), 1)
        x = self.conv81(x)
        x = self.relu(x)
        x = self.conv82(x)
        x = self.relu(x)
        x = self.upconv9(x)
        x = torch.cat((x1, x), 1)
        x = self.conv91(x)
        x = self.relu(x)
        x = self.conv92(x)
        x = self.relu(x)
        x = self.conv93(x)
        return x


# 数据加载类
class mf_data_seg_A:
    def __init__(self, root_path, transforms=None, flag=None, max_samples=10):
        self.img_path = os.path.join(root_path, 'img')
        self.label_path = os.path.join(root_path, 'label')
        self.transforms = transforms
        self.flag = flag
        self.max_samples = max_samples
        self.img_list = self._load_files(self.img_path)
        self.label_list = self._load_files(self.label_path)
        assert len(self.img_list) == len(self.label_list), \
            f"Number of images ({len(self.img_list)}) and labels ({len(self.label_list)}) do not match."
        if len(self.img_list) > self.max_samples:
            self.img_list = self.img_list[:self.max_samples]
            self.label_list = self.label_list[:self.max_samples]

    def _load_files(self, path):
        files = [os.path.join(path, f) for f in os.listdir(path) if f.lower().endswith(('.jpg', '.png', '.jpeg'))]
        files.sort()
        return files

    def __len__(self):
        return len(self.img_list)

    def __getitem__(self, idx):
        img_path = self.img_list[idx]
        label_path = self.label_list[idx]
        img = cv2.imread(img_path)
        label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
        if img is None or label is None:
            raise ValueError(f"Failed to load image: {img_path} or label: {label_path}")
        print(f"加载标签 {label_path}, 原始值范围: min={label.min()}, max={label.max()}")

        # 强制二值化原始标签
        label = (label > 127).astype(np.uint8) * 255  # 阈值 127 确保灰度图二值化
        print(f"二值化后原始标签值范围: min={label.min()}, max={label.max()}")

        if self.transforms:
            transformed = self.transforms(image=img, mask=label)
            img = transformed['image']
            label = transformed['mask']
        img = img.float() / 255.0
        label = label.float() / 255.0
#        print(f"归一化后标签值范围: min={label.min().item():.4f}, max={label.max().item():.4f}")
        unique_values, counts = np.unique(label.numpy(), return_counts=True)
  #      print(f"归一化后标签值分布: {dict(zip(unique_values, counts))}")
        label = label.unsqueeze(0)
        return img, label, os.path.basename(label_path)


class UNetTrainer:
    def __init__(self, device='cuda', image_size=512):
        self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
        self.image_size = image_size
        # 使用带权重的BCE损失来处理类别不平衡
        self.criterion = nn.BCEWithLogitsLoss(reduction='mean')

    def reset_model(self):
        model = U_Net().to(self.device)
        # 使用学习率调度器和更好的优化器设置
        optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=1e-4)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5,
                                                               verbose=True)
        return model, optimizer, scheduler

    def select_balanced_points(self, loader, num_points=200, seed=42, image_idx=0, diversity_factor=0.5):
        """
        改进的点选择策略，增加点的多样性和代表性
        diversity_factor: 控制点选择的多样性，0表示完全随机，1表示最大化距离
        """
        np.random.seed(seed)

        try:
            for i, (_, labels, names) in enumerate(loader):
                if i == image_idx:
                    label = labels[0].numpy()[0]
                    image_name = names[0]
                    break
            else:
                print(f"错误：图像索引 {image_idx} 超出范围（长度：{len(loader)}）")
                return np.array([]), [], [], None
        except Exception as e:
            print(f"错误：无法获取标签，异常：{e}")
            return np.array([]), [], [], None

        # 二值化标签
        label_binary = (label > 0.5).astype(np.float32)

        # 获取正负样本坐标
        pos_idx = np.where(label_binary == 1.0)
        neg_idx = np.where(label_binary == 0.0)
        pos_coords = list(zip(pos_idx[0], pos_idx[1]))
        neg_coords = list(zip(neg_idx[0], neg_idx[1]))

        if len(pos_coords) == 0 or len(neg_coords) == 0:
            print(f"错误：图像 {image_name} 缺少正样本或负样本")
            return np.array([]), [], [], None

        # 改进的平衡采样策略
        target_pos = max(1, min(len(pos_coords), num_points // 3))  # 至少1/3正样本
        target_neg = num_points - target_pos

        # 使用更智能的采样策略
        if diversity_factor > 0 and len(pos_coords) > target_pos:
            pos_points = self._diverse_sampling(pos_coords, target_pos, diversity_factor)
        else:
            pos_selected = np.random.choice(len(pos_coords), target_pos, replace=False)
            pos_points = [pos_coords[i] for i in pos_selected]

        if diversity_factor > 0 and len(neg_coords) > target_neg:
            neg_points = self._diverse_sampling(neg_coords, target_neg, diversity_factor)
        else:
            neg_selected = np.random.choice(len(neg_coords), min(target_neg, len(neg_coords)), replace=False)
            neg_points = [neg_coords[i] for i in neg_selected]

        points = pos_points + neg_points
        points = np.array(points, dtype=np.int32)

        print(f"选择点分布：正样本 {len(pos_points)}, 负样本 {len(neg_points)}, 总数 {len(points)}")

        return points, pos_points, neg_points, label_binary

    def _diverse_sampling(self, coords, n_samples, diversity_factor):
        """多样性采样：结合随机性和空间分布"""
        if len(coords) <= n_samples:
            return coords

        coords_array = np.array(coords)
        selected = []
        remaining = list(range(len(coords)))

        # 随机选择第一个点
        first_idx = np.random.choice(remaining)
        selected.append(coords[first_idx])
        remaining.remove(first_idx)

        # 贪心选择剩余点，平衡随机性和距离
        for _ in range(n_samples - 1):
            if not remaining:
                break

            if np.random.random() < (1 - diversity_factor):
                # 随机选择
                idx = np.random.choice(remaining)
            else:
                # 选择距离已选点最远的点
                distances = []
                for r_idx in remaining:
                    min_dist = float('inf')
                    for sel_point in selected:
                        dist = np.sqrt((coords_array[r_idx][0] - sel_point[0]) ** 2 +
                                       (coords_array[r_idx][1] - sel_point[1]) ** 2)
                        min_dist = min(min_dist, dist)
                    distances.append(min_dist)
                idx = remaining[np.argmax(distances)]

            selected.append(coords[idx])
            remaining.remove(idx)

        return selected

    def train_strategy_1_improved(self, train_loader, val_loader, num_points=200, num_epochs=50, seed=42):
        """改进的策略1：更好的损失函数和训练策略"""
        points, pos_points, neg_points, _ = self.select_balanced_points(
            train_loader, num_points, seed, image_idx=0, diversity_factor=0.3)

        if len(points) == 0:
            return None, 0, 0, 0, 0

        model, optimizer, scheduler = self.reset_model()

        # 计算类别权重
        pos_weight = len(neg_points) / max(len(pos_points), 1)
        print(f"使用正样本权重: {pos_weight:.2f}")

        best_val_loss = float('inf')
        patience_counter = 0
        patience = 8

        # 动态权重策略
        for epoch in range(num_epochs):
            model.train()
            train_loss = 0
            point_losses = []

            # 随机打乱点的顺序，增加训练的随机性
            shuffled_points = points.copy()
            np.random.shuffle(shuffled_points)

            for batch in tqdm(train_loader, desc=f"Strategy 1 Improved, Epoch {epoch + 1}/{num_epochs}"):
                images, labels, _ = batch
                images, labels = images.to(self.device), labels.to(self.device)

                optimizer.zero_grad()
                outputs = model(images)

                # 计算每个点的损失
                total_loss = 0
                batch_point_losses = []

                for i, point in enumerate(shuffled_points):
                    x, y = point[0], point[1]
                    pred = outputs[:, 0, x, y]
                    true = labels[:, 0, x, y]

                    # 动态权重：根据训练进度调整
                    if true.mean() > 0.5:  # 正样本点
                        weight = pos_weight * (1 + 0.5 * epoch / num_epochs)  # 随训练增加正样本权重
                    else:  # 负样本点
                        weight = 1.0

                    point_loss = weight * self.criterion(pred, true)
                    total_loss += point_loss / len(shuffled_points)
                    batch_point_losses.append(point_loss.item())

                total_loss.backward()

                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

                optimizer.step()
                train_loss += total_loss.item()
                point_losses.extend(batch_point_losses)

            avg_train_loss = train_loss / len(train_loader)

            # 验证
            val_loss = self.validate_improved(model, val_loader, points)
            scheduler.step(val_loss)

            print(f"Epoch {epoch + 1}, Train Loss: {avg_train_loss:.4f}, Val Loss: {val_loss:.4f}")

            # Early stopping
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience_counter = 0
                best_model_state = model.state_dict().copy()
            else:
                patience_counter += 1
                if patience_counter >= patience:
                    print(f"Early stopping at epoch {epoch + 1}")
                    model.load_state_dict(best_model_state)
                    break

        # 评估
        acc, sensitivity, specificity, miou = self.evaluate_strategy_1_improved(model, val_loader, points)
        return model, acc, sensitivity, specificity, miou

    def validate_improved(self, model, val_loader, points):
        """改进的验证函数"""
        model.eval()
        val_loss = 0
        total_samples = 0

        with torch.no_grad():
            for batch in val_loader:
                images, labels, _ = batch
                images, labels = images.to(self.device), labels.to(self.device)
                outputs = model(images)

                # 在选定点上计算验证损失
                batch_loss = 0
                for point in points:
                    x, y = point[0], point[1]
                    pred = outputs[:, 0, x, y]
                    true = labels[:, 0, x, y]
                    batch_loss += self.criterion(pred, true) / len(points)

                val_loss += batch_loss.item() * images.size(0)
                total_samples += images.size(0)

        return val_loss / max(total_samples, 1)

    def evaluate_strategy_1_improved(self, model, test_loader, points):
        """改进的评估函数"""
        model.eval()
        all_preds = []
        all_labels = []

        with torch.no_grad():
            for batch in test_loader:
                images, labels, _ = batch
                images, labels = images.to(self.device), labels.to(self.device)
                outputs = model(images)

                for point in points:
                    x, y = point[0], point[1]
                    preds = torch.sigmoid(outputs[:, 0, x, y])
                    true_labels = labels[:, 0, x, y]
                    all_preds.extend(preds.cpu().numpy())
                    all_labels.extend(true_labels.cpu().numpy())

        all_preds = np.array(all_preds)
        all_labels = np.array(all_labels)
        all_labels_binary = (all_labels > 0.5).astype(int)

        # 使用多个阈值进行评估，选择最佳阈值
        thresholds = np.linspace(0.1, 0.9, 9)
        best_f1 = 0
        best_threshold = 0.5

        for thresh in thresholds:
            preds_binary = (all_preds >= thresh).astype(int)
            tp = np.sum((preds_binary == 1) & (all_labels_binary == 1))
            fp = np.sum((preds_binary == 1) & (all_labels_binary == 0))
            fn = np.sum((preds_binary == 0) & (all_labels_binary == 1))

            precision = tp / (tp + fp) if (tp + fp) > 0 else 0
            recall = tp / (tp + fn) if (tp + fn) > 0 else 0
            f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0

            if f1 > best_f1:
                best_f1 = f1
                best_threshold = thresh

        # 使用最佳阈值计算最终指标
        preds_binary = (all_preds >= best_threshold).astype(int)

        tp = np.sum((preds_binary == 1) & (all_labels_binary == 1))
        tn = np.sum((preds_binary == 0) & (all_labels_binary == 0))
        fp = np.sum((preds_binary == 1) & (all_labels_binary == 0))
        fn = np.sum((preds_binary == 0) & (all_labels_binary == 1))

        accuracy = (tp + tn) / len(all_labels_binary) if len(all_labels_binary) > 0 else 0
        sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
        specificity = tn / (tn + fp) if (tn + fp) > 0 else 0

        intersection = tp
        union = tp + fp + fn
        miou = intersection / union if union > 0 else 0

        print(f"最佳阈值: {best_threshold:.3f}, F1: {best_f1:.4f}")

        return accuracy, sensitivity, specificity, miou

    # 保留原有的其他方法
    def validate(self, model, val_loader):
        model.eval()
        val_loss = 0
        if len(val_loader) == 0:
            print("警告：验证集为空，返回默认损失值 0")
            return 0.0
        with torch.no_grad():
            for batch in val_loader:
                images, labels, _ = batch
                images, labels = images.to(self.device), labels.to(self.device)
                outputs = model(images)
                loss = self.criterion(outputs, labels)
                val_loss += loss.item()
        avg_val_loss = val_loss / len(val_loader)
        print(f"Validation Loss: {avg_val_loss:.4f}")
        return avg_val_loss


# 主训练脚本
if __name__ == "__main__":
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 数据变换
    data_trans_train = A.Compose([
        A.Resize(height=512, width=512),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, p=0.5),
        A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
        ToTensorV2()
    ])

    data_trans_val = A.Compose([
        A.Resize(height=512, width=512),
        ToTensorV2()
    ])

    # 数据集路径（请根据实际路径修改）
    root_path = "D:/zhou/od1/data/Image Localization"
    train_data = mf_data_seg_A(root_path=os.path.join(root_path, 'train'),
                               flag='train', transforms=data_trans_train, max_samples=150)
    val_data = mf_data_seg_A(root_path=os.path.join(root_path, 'val'),
                             flag='val', transforms=data_trans_val, max_samples=10)

    train_loader = DataLoader(train_data, batch_size=16, shuffle=True, num_workers=4)  # 减小batch size
    val_loader = DataLoader(val_data, batch_size=16, shuffle=False, num_workers=4)

    print(f"训练集样本数: {len(train_data)}")
    print(f"验证集样本数: {len(val_data)}")

    trainer = UNetTrainer(device='cuda', image_size=512)

    # 更密集的点数量测试
    point_range = [50, 75, 100, 125, 150, 175, 200, 250, 300]
    results_improved = []

    print("=== 运行改进的策略1 ===")
    for num_points in point_range:
        print(f"\n=== 测试点数量: {num_points} ===")

        model, acc, sensitivity, specificity, miou = trainer.train_strategy_1_improved(
            train_loader, val_loader, num_points=num_points, num_epochs=50, seed=42)

        if model is not None:
            print(f"点数: {num_points}, 准确率: {acc:.4f}, 敏感性: {sensitivity:.4f}, "
                  f"特异性: {specificity:.4f}, mIoU: {miou:.4f}")
            results_improved.append((num_points, acc, sensitivity, specificity, miou))
        else:
            print(f"点数: {num_points} 训练失败")

    # 可视化结果
    if results_improved:
        points_list = [r[0] for r in results_improved]
        accs = [r[1] for r in results_improved]
        sensitivities = [r[2] for r in results_improved]
        specificities = [r[3] for r in results_improved]
        mious = [r[4] for r in results_improved]

        plt.figure(figsize=(15, 10))

        plt.subplot(2, 2, 1)
        plt.plot(points_list, accs, 'o-', label='Accuracy', color='blue')
        plt.xlabel('Number of Points')
        plt.ylabel('Accuracy')
        plt.title('Accuracy vs Number of Points')
        plt.grid(True)
        plt.legend()

        plt.subplot(2, 2, 2)
        plt.plot(points_list, sensitivities, 'o-', label='Sensitivity', color='green')
        plt.xlabel('Number of Points')
        plt.ylabel('Sensitivity')
        plt.title('Sensitivity vs Number of Points')
        plt.grid(True)
        plt.legend()

        plt.subplot(2, 2, 3)
        plt.plot(points_list, specificities, 'o-', label='Specificity', color='red')
        plt.xlabel('Number of Points')
        plt.ylabel('Specificity')
        plt.title('Specificity vs Number of Points')
        plt.grid(True)
        plt.legend()

        plt.subplot(2, 2, 4)
        plt.plot(points_list, mious, 'o-', label='mIoU', color='purple')
        plt.xlabel('Number of Points')
        plt.ylabel('mIoU')
        plt.title('mIoU vs Number of Points')
        plt.grid(True)
        plt.legend()

        plt.tight_layout()
        plt.savefig('improved_performance_vs_points.png', dpi=300, bbox_inches='tight')
        plt.show()

        # 打印统计信息
        print("\n=== 结果总结 ===")
        for i, (num_points, acc, sens, spec, miou) in enumerate(results_improved):
            print(f"点数: {num_points:3d} | 准确率: {acc:.4f} | 敏感性: {sens:.4f} | "
                  f"特异性: {spec:.4f} | mIoU: {miou:.4f}")

        # 分析趋势
        if len(results_improved) > 2:
            miou_trend = np.polyfit(points_list, mious, 1)[0]
            sens_trend = np.polyfit(points_list, sensitivities, 1)[0]
            print(f"\nmIoU 趋势斜率: {miou_trend:.6f}")
            print(f"敏感性趋势斜率: {sens_trend:.6f}")

            if miou_trend > 0:
                print("✓ mIoU 随点数增加呈上升趋势")
            else:
                print("✗ mIoU 随点数增加呈下降趋势")