"""
-------------------------------------------------
   File Name:       data_loader.py
   Project Name:    beamform_AYnet
   Author :         Chunshan YANG
   Date:            2025/2/2
   Device:          GTX2070
-------------------------------------------------
   Change Activity:
                   2025/2/2:
-------------------------------------------------
"""
from pathlib import Path
from functools import partial
from tqdm import tqdm

import numpy as np
import scipy.io as scio
import scipy.ndimage
import torch
import torch.nn.functional as F
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader

from utils.sensors import read_sensor_data


### 光声及超声数据集
class PaUsDataset(data.Dataset):
    def __init__(self, root_filepath, image_transform=None, sensor_transform=None, return_filestem=False):
        self.root_filepath = Path(root_filepath)
        self.image_transform = image_transform
        self.sensor_transform = sensor_transform
        self.return_filestem = return_filestem

        sensor_file_paths = []
        if self.root_filepath.is_file():
            with self.root_filepath.open('r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line:
                        sensor_file_paths.append(Path(line + "_rf.mat"))
        elif self.root_filepath.is_dir():
            sensor_file_paths = list(self.root_filepath.rglob('*.mat'))
        else:
            raise FileNotFoundError(f"'{self.root_filepath}' is not a valid file or directory.")

        self.names = []
        self.datasets = []
        loaded_count = 0
        for i, sensor_file_path in tqdm(enumerate(sensor_file_paths), total=len(sensor_file_paths)):
            # print(f"Loading {sensor_file_path}...")
            stem = sensor_file_path.stem[:-3]
            gt_img_file_path = sensor_file_path.parent / f"{stem}_gt.png"  # 使用sensor_file_path的父目录
            das_img_file_path = sensor_file_path.parent / f"{stem}_das.png" # 使用sensor_file_path的父目录
            
            # print(f"Checking files for {stem}:")
            # print(f"  Sensor: {sensor_file_path} (exists: {sensor_file_path.exists()})")
            # print(f"  GT: {gt_img_file_path} (exists: {gt_img_file_path.exists()})")
            # print(f"  DAS: {das_img_file_path} (exists: {das_img_file_path.exists()})")
            
            if das_img_file_path.exists():
                # 读取 .mat 文件并预处理数据
                sensor_data = read_sensor_data(sensor_file_path, transform=self.sensor_transform)
                
                if gt_img_file_path.exists():
                    # 读取 .gt.png 文件并预处理数据
                    gt_image = Image.open(str(gt_img_file_path))
                    if self.image_transform:
                        gt_image = self.image_transform(gt_image)
                    elif not isinstance(gt_image, torch.Tensor):
                        gt_image = transforms.ToTensor()(gt_image)  # 转换为张量
                else:
                    gt_image = None
                
                # 读取 .das.png 文件并预处理数据
                das_image = Image.open(str(das_img_file_path))
                if self.image_transform:
                    das_image = self.image_transform(das_image)
                elif not isinstance(das_image, torch.Tensor):
                    das_image = transforms.ToTensor()(das_image)  # 转换为张量

                if gt_image is not None and gt_image.shape == 3 and gt_image.shape[0] == 3:  # 确保gt_image是单通道的
                    gt_image = gt_image.mean(dim=0, keepdim=True)
                if das_image.shape == 3 and das_image.shape[0] == 3:  # 确保das_image是单通道的
                    das_image = das_image.mean(dim=0, keepdim=True)
                
                self.datasets.append({
                    'sensor_data': sensor_data,
                    'gt_image': gt_image,
                    'das_image': das_image,
                })
                self.names.append(stem)
                loaded_count += 1
        
                if loaded_count <= 3:  # 打印一些数据范围信息
                    print(f"Loaded sample {loaded_count} - Sensor data range: {sensor_data.min().item():.4f} to {sensor_data.max().item():.4f}")
                    if isinstance(gt_image, torch.Tensor):
                        print(f"Loaded sample {loaded_count} - Ground truth image range: {gt_image.min().item():.4f} to {gt_image.max().item():.4f}")
                    if isinstance(das_image, torch.Tensor):
                        print(f"Loaded sample {loaded_count} - DAS image range: {das_image.min().item():.4f} to {das_image.max().item():.4f}")
            else:
                print(f"No valid sample found for {stem}")
        
        print(f"Total files found: {len(sensor_file_paths)}")
        print(f"Valid samples loaded: {loaded_count}")
        
        if len(self.datasets) == 0:
            raise ValueError(f"No valid samples found in {self.root_filepath}. Please check that .mat files have corresponding _gt.png and _das.png files.")
        
    def __len__(self):
        return len(self.datasets)

    def __getitem__(self, idx):
        if isinstance(idx, slice):  # 处理切片操作 (例如 dataset[:3])
            # 获取切片对应的索引列表
            indices = range(*idx.indices(len(self)))
            
            # 按类别组织返回数据
            sensor_data_batch = []
            gt_image_batch = []
            das_image_batch = []
            stems_batch = []
            
            # 收集每个索引的数据
            for i in indices:
                sample = self._get_single_item(i)
                if self.return_filestem:
                    sensor_data_batch.append(sample[0])
                    gt_image_batch.append(sample[1])
                    das_image_batch.append(sample[2])
                    stems_batch.append(sample[3])
                else:
                    sensor_data_batch.append(sample[0])
                    gt_image_batch.append(sample[1])
                    das_image_batch.append(sample[2])
            
            # 如果是张量，则可以堆叠为批量张量
            if all(isinstance(x, torch.Tensor) for x in sensor_data_batch):
                sensor_data_batch = torch.stack(sensor_data_batch)
            if all(isinstance(x, torch.Tensor) for x in gt_image_batch):
                gt_image_batch = torch.stack(gt_image_batch)
            if all(isinstance(x, torch.Tensor) for x in das_image_batch):
                das_image_batch = torch.stack(das_image_batch)
                
            # 返回按类别组织的数据
            if self.return_filestem:
                return sensor_data_batch, gt_image_batch, das_image_batch, stems_batch
            else:
                return sensor_data_batch, gt_image_batch, das_image_batch
        return self._get_single_item(idx)

    def _get_single_item(self, idx):
        """获取单个样本的辅助方法"""
        if idx >= len(self) or idx < 0:
            raise IndexError(f"Index {idx} out of range for dataset of size {len(self)}")
        
        if self.return_filestem:
            return (
                self.datasets[idx]['sensor_data'], 
                self.datasets[idx]['gt_image'], 
                self.datasets[idx]['das_image'],
                self.names[idx]
            )
        else:
            return (
                self.datasets[idx]['sensor_data'], 
                self.datasets[idx]['gt_image'], 
                self.datasets[idx]['das_image']
            )
    

if __name__ == "__main__":
    dataset_path = r'E:\code\work\pabone\datasets\pa_dataset/'

    dataset = PaUsDataset(dataset_path)
    #print(mydataset.__getitem__(3))
    train_loader = DataLoader(dataset, batch_size=1, shuffle = True)
    batch_idx, (rawdata, reimage, bfim) = list(enumerate(train_loader))[0]
    print(rawdata.size())
    print(rawdata.max())
    print(rawdata.min())
    print(dataset.__len__())






