import os
import numpy as np
from PIL import Image
from collections import defaultdict
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter

class MVSDataset(Dataset):
    """支持动态生成正负样本对的对比学习数据集"""

    def __init__(self, root_dir, mode='train', pairs_per_epoch=10000,
                 val_ratio=0.2, seed=42):
        # 初始化参数
        self.mode = mode
        self.pairs_per_epoch = pairs_per_epoch
        self.rng = np.random.default_rng(seed)

        # 加载基础数据
        self.all_samples = []
        self.point_dict = defaultdict(list)
        self._load_folder(root_dir)

        # 划分数据集
        self.train_points, self.val_points = self._split_datasets(val_ratio, seed)

        # 设置当前模式的数据
        self._set_mode(mode)

    def _load_folder(self, folder):
        """加载原始数据文件"""
        metadata = np.loadtxt(os.path.join(folder, "info.txt"), dtype=np.int64)
        img_files = sorted([f for f in os.listdir(folder) if f.endswith(".bmp")])

        for i, (point_id, _) in enumerate(metadata):
            pic_idx = i // 256
            row = (i % 256) // 16
            col = (i % 256) % 16
            img_path = os.path.join(folder, img_files[pic_idx])

            sample = {
                "img_path": img_path,
                "grid_pos": (row, col),
                "point_id": point_id,
                "patch_id": i
            }

            self.point_dict[point_id].append(sample)
            self.all_samples.append(sample)

    def _split_datasets(self, val_ratio, seed):
        """划分训练验证集"""
        all_points = list(self.point_dict.keys())
        self.rng.shuffle(all_points)

        split_idx = int(len(all_points) * (1 - val_ratio))
        return set(all_points[:split_idx]), set(all_points[split_idx:])

    def _set_mode(self, mode):
        """设置数据集模式"""
        self.mode = mode
        self.active_points = self.train_points if mode == 'train' else self.val_points

        # 构建快速索引
        self.active_samples = []
        self.point_to_indices = defaultdict(list)
        for idx, sample in enumerate(self.all_samples):
            if sample["point_id"] in self.active_points:
                self.active_samples.append(sample)
                self.point_to_indices[sample["point_id"]].append(idx)

    def __len__(self):
        return self.pairs_per_epoch

    def __getitem__(self, _):
        """动态生成样本对（忽略索引参数）"""
        # 随机选择正/负样本对类型
        pos_idx1, pos_idx2 = self._generate_positive_pair()
        neg_idx1, neg_idx2 = self._generate_negative_pair()

        pos1 = self._load_patch(pos_idx1)
        pos2 = self._load_patch(pos_idx2)

        neg1 = self._load_patch(neg_idx1)
        neg2 = self._load_patch(neg_idx2)

        return pos1, pos2, neg1, neg2

    def _generate_positive_pair(self):
        """生成正样本对"""
        valid_points = [p for p, indices in self.point_to_indices.items()
                        if len(indices) >= 2]
        point_id = self.rng.choice(valid_points)
        indices = self.point_to_indices[point_id]
        idx1, idx2 = self.rng.choice(indices, 2, replace=False)
        return idx1, idx2

    def _generate_negative_pair(self):
        """生成负样本对"""
        point_ids = list(self.point_to_indices.keys())
        p1, p2 = self.rng.choice(point_ids, 2, replace=False)
        idx1 = self.rng.choice(self.point_to_indices[p1])
        idx2 = self.rng.choice(self.point_to_indices[p2])
        return idx1, idx2

    def _load_patch(self, idx):
        """加载单个图像块"""
        sample = self.all_samples[idx]
        img = Image.open(sample["img_path"]).convert('L')
        img_array = np.array(img)

        row, col = sample["grid_pos"]
        patch = img_array[row * 64:(row + 1) * 64, col * 64:(col + 1) * 64]
        return torch.from_numpy(patch).unsqueeze(0).float() / 255.0


# 使用示例
if __name__ == "__main__":
    # 初始化数据集
    train_dataset = MVSDataset(
        root_dir=r"D:\project\FeatureExtration\datasets\output",
        mode='train',
        pairs_per_epoch=10000,
        val_ratio=0.2,
        seed=88
    )

    # val_dataset = MVSDataset(
    #     root_dir=r"D:\project\FeatureExtration\datasets\output",
    #     mode='val',
    #     pairs_per_epoch=2000,
    #     val_ratio=0.2,
    #     seed=66
    # )

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=30,
        shuffle=False,  # 保持True以增强随机性
        num_workers=1,
        pin_memory=True
    )

    # val_loader = DataLoader(
    #     val_dataset,
    #     batch_size=1,
    #     shuffle=False,
    #     num_workers=4,
    #     pin_memory=True
    # )

    writer = SummaryWriter("logs")

    step = 0
    # 迭代数据示例
    for pos1, pos2, neg1, neg2 in train_loader:
        writer.add_images("pos1", pos1, global_step=step)
        writer.add_images("pos2", pos2, global_step=step)
        writer.add_images("neg1", neg1, global_step=step)
        writer.add_images("neg2", neg2, global_step=step)
        print(f"Batch size: {len(pos1)}")
        if step == 5:
            break

        step += 1


# import os
# import numpy as np
# from PIL import Image
# import torch
# from torch.utils.data import Dataset, DataLoader
# from collections import defaultdict
#
#
# class MVTDataset(Dataset):
#     """处理非连续3D点索引和动态负样本生成的数据集"""
#
#     def __init__(self, root_dir, val_ratio=0.2, seed=42):
#         self.point_dict = defaultdict(list)  # {point_id: [样本元数据列表]}
#         self.all_samples = []  # 所有样本的线性存储
#         self.val_ratio = val_ratio
#         self.seed = seed
#
#         self._load_folder(root_dir)
#
#         # 划分训练/验证集（确保同一3D点的样本在同一个集合中）
#         self._split_datasets()
#
#     def _load_folder(self, folder):
#         """加载单个文件夹的数据"""
#         # 读取元数据文件
#         metadata = np.loadtxt(os.path.join(folder, "info.txt"), dtype=np.int64)
#         # 获取关联的BMP图像文件列表
#         img_files = sorted([f for f in os.listdir(folder) if f.endswith(".bmp")])
#
#         for i, (point_id, _) in enumerate(metadata):
#             pic_idx = i // 256
#             row = (pic_idx % 256) // 16
#             col = (pic_idx % 256) % 16
#             img_path = os.path.join(folder, img_files[pic_idx])
#
#             sample = {
#                 "img_path": img_path,
#                 "grid_pos": (row, col),
#                 "point_id": point_id,
#                 "patch_id": i
#             }
#
#             self.point_dict[point_id].append(sample)
#             self.all_samples.append(sample)
#
#     def _split_datasets(self):
#         """基于3D点进行数据集划分"""
#         np.random.seed(self.seed)
#
#         # 获取所有唯一的3D点ID
#         all_points = list(self.point_dict.keys())
#         np.random.shuffle(all_points)
#
#         # 计算分割点
#         split_idx = int(len(all_points) * (1 - self.val_ratio))
#         train_points = set(all_points[:split_idx])
#         val_points = set(all_points[split_idx:])
#
#         # 建立索引映射
#         self.train_indices = []
#         self.val_indices = []
#
#         for idx, sample in enumerate(self.all_samples):
#             if sample["point_id"] in train_points:
#                 self.train_indices.append(idx)
#             else:
#                 self.val_indices.append(idx)
#
#     def __len__(self):
#         return len(self.all_samples)
#
#     def __getitem__(self, idx):
#         """返回单个补丁及其元数据"""
#         sample = self.all_samples[idx]
#
#         # 加载图像并提取补丁
#         img = Image.open(sample["img_path"]).convert('L')
#         img_array = np.array(img)
#
#         # 计算补丁坐标
#         row, col = sample["grid_pos"]
#         patch = img_array[row * 64:(row + 1) * 64, col * 64:(col + 1) * 64]
#
#         # 转换为张量
#         patch_tensor = torch.from_numpy(patch).unsqueeze(0).float() / 255.0
#
#         return {
#             "patch": patch_tensor,
#             "point_id": sample["point_id"],
#             "patch_id": sample["patch_id"]
#         }
#
#
# class ContrastiveLoader:
#     """对比学习数据加载器，动态生成正负样本对"""
#
#     def __init__(self, dataset, mode='train', pairs_per_epoch=1e4):
#         self.dataset = dataset
#         self.mode = mode
#         self.indices = dataset.train_indices if mode == 'train' else dataset.val_indices
#         self.pairs_per_epoch = int(pairs_per_epoch)
#
#         # 建立快速查询结构
#         self.idx_to_point = {idx: self.dataset.all_samples[idx]["point_id"]
#                              for idx in self.indices}
#         self.point_to_indices = defaultdict(list)
#         for idx in self.indices:
#             point_id = self.idx_to_point[idx]
#             self.point_to_indices[point_id].append(idx)
#
#     def _generate_positive_pair(self):
#         """生成正样本对（同一3D点的不同贴片）"""
#         # 随机选择一个包含多个样本的3D点
#         valid_points = [p for p, indices in self.point_to_indices.items()
#                         if len(indices) >= 2]
#         point_id = np.random.choice(valid_points)
#
#         # 随机选择该点的两个不同样本
#         candidates = self.point_to_indices[point_id]
#         idx1, idx2 = np.random.choice(candidates, 2, replace=False)
#
#         return (idx1, idx2), 1  # 1表示正样本对
#
#     def _generate_negative_pair(self):
#         """生成负样本对（不同3D点的样本）"""
#         # 随机选择两个不同的3D点
#         point_ids = list(self.point_to_indices.keys())
#         p1, p2 = np.random.choice(point_ids, 2, replace=False)
#
#         # 从每个点中随机选择一个样本
#         idx1 = np.random.choice(self.point_to_indices[p1])
#         idx2 = np.random.choice(self.point_to_indices[p2])
#
#         return (idx1, idx2), 0  # 0表示负样本对
#
#     def __iter__(self):
#         self.count = 0
#         return self
#
#     def __next__(self):
#         if self.count >= self.pairs_per_epoch:
#             raise StopIteration
#
#         # 动态生成正负样本对（1:1比例）
#         if np.random.rand() < 0.5:
#             pair, label = self._generate_positive_pair()
#         else:
#             pair, label = self._generate_negative_pair()
#
#         self.count += 1
#
#         # 获取实际数据
#         sample1 = self.dataset[pair[0]]
#         sample2 = self.dataset[pair[1]]
#
#         return (
#             (sample1["patch"], sample2["patch"]),
#             torch.tensor(label, dtype=torch.float)
#         )
#
#
# # 使用示例
# if __name__ == "__main__":
#     # 初始化数据集
#     dataset = MVTDataset(
#         root_dirs=["data/folder1"],
#         val_ratio=0.2,
#         seed=42
#     )
#
#     # 创建数据加载器
#     train_loader = ContrastiveLoader(dataset, mode='train', pairs_per_epoch=10000)
#     val_loader = ContrastiveLoader(dataset, mode='val', pairs_per_epoch=2000)
#
#     # 训练循环示例
#     for epoch in range(10):
#         # 训练阶段
#         for (anchor, pos_neg), labels in train_loader:
#             # 此处添加模型训练逻辑
#             # anchor和pos_neg可能是正样本对或负样本对
#             pass
#
#         # 验证阶段
#         with torch.no_grad():
#             for (anchor, pos_neg), labels in val_loader:
#                 # 此处添加模型验证逻辑
#                 pass


# from torch.utils.data import Dataset, DataLoader
# import numpy as np
#
#
# class MVSDataset(Dataset):
#     def __init__(self, patch_dir):
#         self.patches = np.load(patch_dir + "/patches.npy")  # 形状(N, 1, 64, 64)
#         self.point_ids = np.load(patch_dir + "/point_ids.npy")  # 形状(N,)
#
#     def __len__(self):
#         return len(self.patches)
#
#     def __getitem__(self, idx):
#         return self.patches[idx], self.point_ids[idx]
#
#
# def generate_pairs(dataset, s_p=1024, s_n=1024):
#     """动态生成正负样本对"""
#     pos_pairs = []
#     neg_pairs = []
#
#     # 生成正样本对
#     unique_ids = np.unique(dataset.point_ids)
#     selected_ids = np.random.choice(unique_ids, s_p, replace=True)
#     for pid in selected_ids:
#         indices = np.where(dataset.point_ids == pid)[0]
#         if len(indices) >= 2:
#             i, j = np.random.choice(indices, 2, replace=False)
#             pos_pairs.append((dataset[i][0], dataset[j][0]))
#
#     # 生成负样本对
#     for _ in range(s_n):
#         i, j = np.random.choice(len(dataset), 2, replace=False)
#         if dataset.point_ids[i] != dataset.point_ids[j]:
#             neg_pairs.append((dataset[i][0], dataset[j][0]))
#
#     return pos_pairs, neg_pairs