from torch.utils.data import DataLoader, Dataset, random_split
from torch.utils.data.distributed import DistributedSampler

import pytorch_lightning as pl

from torch.utils.data import Dataset
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import torch
from utils import logger
from utils.galaxy_data_utils.transform_util import *

img_size = 256
overlap_ratio = 0


def average_pool_2d(image, pool_size):
    """
    对单张图像进行平均池化（兼容旧版 NumPy）
    """
    h, w = image.shape
    ph, pw = pool_size

    # 确保图像尺寸能被整除
    assert h % ph == 0 and w % pw == 0, "图像尺寸不能被池化大小整除"

    # reshape 并取均值
    pooled = image.reshape(h // ph, ph, w // pw, pw)
    pooled = pooled.mean(axis=(1, 3))
    return pooled

def batch_average_pool(dataset, pool_size):
    """
    对整个数据集进行平均池化
    """
    N, H, W = dataset.shape
    assert H == 2048 and W == 2048, "输入图像必须是 2048x2048"

    ph, pw = pool_size
    out_shape = (N, H // ph, W // pw)
    pooled_dataset = np.empty(out_shape, dtype=dataset.dtype)

    for i in range(N):
        pooled_dataset[i] = average_pool_2d(dataset[i], pool_size)

    return pooled_dataset

def sliding_window_crop(dataset, patch_size, overlap_ratio):
    """
    对输入的 dataset 进行滑动窗口裁剪
    :param dataset: 形状为 (N, H, W)
    :param patch_size: 图像块大小
    :param overlap_ratio: 重叠率 (0~1)
    :return: 裁剪后的图像块数组，形状为 (N * num_patches, patch_size, patch_size)
    """
    N, H, W = dataset.shape
    assert H == W == 2048, "图像必须为 2048x2048"
    patch_size = int(patch_size)

    stride = int(patch_size * (1 - overlap_ratio))
    num_patches_per_side = (2048 - patch_size) // stride + 1
    total_patches = num_patches_per_side * num_patches_per_side

    # 初始化输出数组
    output_shape = (int(N * total_patches), patch_size, patch_size)
    print(output_shape)
    patches = np.empty(output_shape, dtype=dataset.dtype)

    idx = 0
    for i in range(N):
        img = dataset[i]
        for y in range(0, H - patch_size + 1, stride):
            for x in range(0, W - patch_size + 1, stride):
                patch = img[y:y+patch_size, x:x+patch_size]
                patches[idx] = patch
                idx += 1

    return patches

def load_fits(file_path):
    """
    加载 FITS 文件并返回数据数组
    :param file_path: FITS 文件路径
    :return: 数据数组
    """
    # 检查文件是否存在
    print("---Loading fits file from SDC3a dataset...")
    try:
        # 打开 FITS 文件并读取数据
        with fits.open(file_path) as hdul:
            data = hdul[0].data
            print(data.shape)
            if data is not None:
                if data.dtype.byteorder == '>':
                    print("[Warning] Big-endian detected, converting to native byte order.")
                    data = data.byteswap().newbyteorder()
                data_array = np.array(data)
                print(data_array.shape)
                return data_array
            else:
                print(f"错误：文件 {file_path} 中没有有效的数据！")
    except Exception as e:
        print(f"错误：读取文件 {file_path} 时发生异常：{e}")
        

class SDC3aDataset(Dataset):
    '''
    从fits文件中导入SDC3a数据集（包括脏图和PSF）
    ''' 
    def __init__(self,  
            dset_name,
            data_path_imgs, 
            data_path_psf,
            img_res,
            cut_rate,
            pre_normalize,
            need_process,
            ):
        # 图像分辨率（原始图像为2048x2048，裁剪为256x256）
        self.img_res = img_res / cut_rate
        self.cut_rate = cut_rate

        if data_path_psf:
            psf_fits_data = load_fits(data_path_psf)
            self.psf_dataset = psf_fits_data
            if need_process:
                self.psf_dataset = batch_average_pool(psf_fits_data, (cut_rate, cut_rate))
            print("psf dataset: {}", self.psf_dataset.shape)
    
        if data_path_imgs:
            img_fits_data = load_fits(data_path_imgs)
            self.img_dataset = img_fits_data
            if need_process:
                self.img_dataset = sliding_window_crop(img_fits_data, self.img_res, overlap_ratio)
            print("img dataset: {}", self.img_dataset.shape)
            
        # 预处理标志，是否进行归一化
        self.pre_normalize = pre_normalize
            

    def __getitem__(self, idx):
        """
        根据传入索引idx获取对应img和psf图像（尺寸均为256x256）
        """
        if idx < 0 or idx >= len(self):
            raise IndexError("Index out of range")
        # 从img_dataset中获取图像数据
        img = self.img_dataset[idx]
        idx_psf = int(idx / (self.cut_rate * self.cut_rate))
        # 从psf_dataset中获取PSF数据
        psf = self.psf_dataset[idx_psf]

        # 转为 Tensor
        img = torch.tensor(img, dtype=torch.float32).unsqueeze(0)  # (1, H, W)
        psf = torch.tensor(psf, dtype=torch.float32).unsqueeze(0)  # (1, K, K)

        # psf中心对齐
        _, H, W = psf.shape
        flat_psf = psf.flatten()
        max_idx = torch.argmax(flat_psf)
        py = max_idx // W
        px = max_idx % W
        cy, cx = H // 2, W // 2
        if abs(py - cy) > 1 or abs(px - cx) > 1:
            # FFT shift
            PSF_fft = torch.fft.fft2(psf)
            yfreq = torch.fft.fftfreq(H).view(1,1,-1,1)
            xfreq = torch.fft.fftfreq(W).view(1,1,1,-1)
            phase = torch.exp(-2j * torch.pi * (yfreq * (cy - py) + xfreq * (cx - px)))
            psf = torch.fft.ifft2(PSF_fft * phase).real

        # 归一化
        if self.pre_normalize:
            # # 归一化到[-1, 1]
            # img_min = img.min()
            # img_max = img.max()
            # assert img_max - img_min > 1e-4, f"Image {idx} has constant value"
            # img = (img - img_min) / (img_max - img_min + 1e-8)
            # img = 2 * img - 1

            # L1归一化
            psf_sum = psf.sum()
            if psf_sum < 1e-6:
                logger.log(f"PSF at index {idx} has near-zero sum, applying zero normalization.")
                psf = torch.zeros_like(psf)
            else:
                psf = psf / (psf_sum + 1e-8)  # 防止除以零

        data_dict = {
            "psf": psf
        }

        # 返回图像数据和PSF数据
        return img, data_dict

    def __len__(self):
        """
        获取数据集中图像数量
        """
        return len(self.img_dataset)


def load_data(
        data_dir,
        data_info_list_path,
        batch_size,
        random_flip=False,
        is_distributed=False,
        is_train=False,
        mask_type=None,
        center_fractions=None,
        accelerations=None,
        post_process=None,
        num_workers=0,
):
    pl.seed_everything(42) # 使用 pytorch_lightning 库设置全局随机种子为 42，确保实验的可重复性。
    if is_train:  # 对于训练模式
        dataset = SDC3aDataset(dset_name="SDC3a",
                           data_path_imgs="/data/mlc/SKA-SDC-dataset/ZW3.msw_image_without_zero.fits",
                           data_path_psf="/data/mlc/SKA-SDC-dataset/ZW3.msw_psf_without_zero.fits",
                           img_res=2048,
                           cut_rate=8,
                           pre_normalize=True,
                           need_process=True)
        data_sampler = None
        if is_distributed:
            data_sampler = DistributedSampler(dataset)
        loader = DataLoader(  # 使用DataLoader创建数据加载器
            dataset,
            batch_size=batch_size,
            shuffle=(data_sampler is None) and is_train,
            sampler=data_sampler,
            num_workers=num_workers,
            drop_last=is_train,
            pin_memory=True,
        )
        # return loader 使用 while True 和 yield from 实现一个无限循环的数据生成器，不断从数据加载器中获取数据。
        while True:
            yield from loader

    else:  # 对于测试模式
        print("batch_size:", batch_size)
        dataset = SDC3aDataset(dset_name="SDC3a",
                           data_path_imgs="/data/mlc/SKA-SDC-dataset/dirty_3.fits",
                           data_path_psf="/data/mlc/SKA-SDC-dataset/psf_3.fits",
                           img_res=256,
                           cut_rate=1,
                           pre_normalize=True,
                           need_process=False)
        if is_distributed:
            data_sampler = DistributedSampler(dataset)
        loader = DataLoader(  # 使用DataLoader创建数据加载器
            dataset,
            batch_size=batch_size,
            shuffle=(data_sampler is None) and is_train,
            sampler=data_sampler,
            num_workers=num_workers,
            drop_last=is_train,
            pin_memory=True,
        )
        while True:
            yield from loader

if __name__ == "__main__":
    SDC3a = SDC3aDataset(dset_name="SDC3a",
                         data_path_imgs="/data/mlc/SKA-SDC-dataset/dirty.fits",
                         data_path_psf="/data/mlc/SKA-SDC-dataset/psf.fits",
                         img_res=256,
                         cut_rate=1,
                         pre_normalize=True)
    
    print(SDC3a.__len__())
