# Copyright 2021 Zhongyang Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from lightning import LightningDataModule
from torch.utils.data import random_split, DataLoader
import torchvision.transforms as transforms

from data.pa_us_dataset import PaUsDataset
from utils.sensors import preprocess_sensor_data

class DInterface(LightningDataModule):

    def __init__(self, train_file, val_file,
                 modality='US',
                 train_val_ratio=0.8,
                 num_workers=2,
                 batch_size=32,
                 pin_memory=True,
                 persistent_workers=True):
        super().__init__()
        self.train_file = train_file
        self.val_file = val_file

        self.modality = modality
        self.train_val_ratio = train_val_ratio

        self.num_workers = num_workers
        self.batch_size = batch_size

        assert 0 < self.train_val_ratio <= 1
        assert modality in ('US', 'PA')

        self.image_transform = transforms.Compose([
            transforms.Resize((256, 128)),
            transforms.ToTensor(),  # [0, 1]
            transforms.Normalize(mean=[0.5], std=[0.5])  # Normalize to [-1, 1]
        ])
        self.sensor_transform = transforms.Compose([
            partial(preprocess_sensor_data, target_shape=(2560, 64))  # Normalize to [-1, 1]
        ])
        
        self.pin_memory = pin_memory
        self.persistent_workers = persistent_workers

    def setup(self, stage: str):
        # Assign train/val datasets for use in dataloaders
        if stage == "fit":
            # full_dataset = PaUsDataset(self.train_dir, modality=self.modality, image_transform=self.image_transform, sensor_transform=self.sensor_transform)
            # # 计算训练集和验证集的大小
            # train_size = int(self.train_val_ratio * len(full_dataset))
            # val_size = len(full_dataset) - train_size
            # self.train_dataset, self.val_dataset = random_split(full_dataset, [train_size, val_size])
            self.train_dataset = PaUsDataset(self.train_file, image_transform=self.image_transform, sensor_transform=self.sensor_transform)
            self.val_dataset = PaUsDataset(self.val_file, image_transform=self.image_transform, sensor_transform=self.sensor_transform)
            
            # 打印数据集大小
            print(f"训练集大小：{len(self.train_dataset)}")
            print(f"验证集大小：{len(self.val_dataset)}")

        # Assign test dataset for use in dataloader(s)
        if stage == "test":
            raise NotImplementedError("Not implemented")

        if stage == "predict":
            raise NotImplementedError("Not implemented")

    def train_dataloader(self):
        return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=self.pin_memory, persistent_workers=self.persistent_workers)

    def val_dataloader(self):
        return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, pin_memory=self.pin_memory, persistent_workers=self.persistent_workers)
