# coding=utf-8
# Copyright (C) idata team - All Rights Reserved
#
# @Version:   3.10.9
# @Software:  PyCharm
# @FileName:  datamodule.py
# @CTime:     2023/6/1 10:36   
# @Author:    yhy
# @Email:     yhy@cyber.com
# @UTime:     2023/6/1 10:36
#
# @Description:
#     
#     xxx
#
import logging
from typing import NewType, Any, Optional
import pytorch_lightning as pl
logger = logging.getLogger(__name__)


class DataModule(pl.LightningDataModule):
    def __init__(self, **cfg):
        self.save_hyperparameters()
        self.xxx = xxx

    def prepare_data(self) -> None:
        # download, tokenize, etc...
        # called only within a single process on CPU, so you can safely add your downloading logic within.
        pass

    def setup(self, stage: str) -> None:
        # There are also data operations you might want to perform on every GPU. Use setup() to do things like:
        # split, transform,
        # Assign train/val datasets for use in dataloaders

        # count number of classes
        #
        # build vocabulary
        #
        # perform train/val/test splits
        #
        # create datasets
        #
        # apply transforms (defined explicitly in your datamodule)
        #
        # etc…

        if stage == "fit":
            mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
            self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])

        # Assign test dataset for use in dataloader(s)
        if stage == "test":
            self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)

        if stage == "predict":
            self.mnist_predict = MNIST(self.data_dir, train=False, transform=self.transform)


    def train_dataloader(self) -> TRAIN_DATALOADERS:
        # trainer.fit(model, datamodule=dm)

        pass

    def val_dataloader(self) -> EVAL_DATALOADERS:
        # trainer.validate(datamodule=dm)

        pass

    def test_dataloader(self) -> EVAL_DATALOADERS:
        # trainer.test(datamodule=dm)

        pass

    def predict_dataloader(self) -> EVAL_DATALOADERS:
        # trainer.predict(datamodule=dm)

        pass

    def teardown(self, stage: str):
        # Used to clean-up when the run is finished
        # Called at the end of fit (train + validate), validate, test, or predict.
        pass




if __name__ == '__main__':
    dataset = DataModule()
