import os
import numpy as np
import torch.nn as nn
from typing import *
import torch
from torch.utils.data import TensorDataset,DataLoader
from .transform import UnitGaussianNormalizer
from utils import register_class
from sklearn.model_selection import train_test_split
import pytorch_lightning as pl


class MixNormalizer_8in(nn.Module):
    def __init__(self, 
                 pos_min, 
                 pos_max,
                 feature_mean,
                 feature_std,
                 eps=1e-06):
        '''
        Normalizer for 8 in_channels, pos dimension should be 3, feature dimension should be 5
        '''
        super().__init__()
        self.pos_min = torch.tensor(pos_min)
        self.pos_max = torch.tensor(pos_max)
        self.feature_mean = torch.tensor(feature_mean)
        self.feature_std = torch.tensor(feature_std)
        self.eps = torch.tensor(eps)

    def encode(self, x):
        device = x.device
        x[..., :3] = (x[..., :3] - self.pos_min.to(device)) / (self.pos_max.to(device) - self.pos_min.to(device) + self.eps.to(device))
        x[..., 3:] = (x[..., 3:] - self.feature_mean.to(device)) / (self.feature_std.to(device) + self.eps.to(device))
        return x

    def decode(self, x):
        device = x.device
        x[..., :3] = x[..., :3] * (self.pos_max.to(device) - self.pos_min.to(device) + self.eps) + self.pos_min.to(device)
        x[..., 3:] = x[..., 3:] * (self.feature_std.to(device) + self.eps.to(device)) + self.feature_mean.to(device)
        return x


@register_class(name=['TrainingDataModule8in'])
class TrainingDataModule8in(pl.LightningDataModule):
    def __init__(self, 
                 data_dir: str = "dataset/datasetV45",
                 batch_size: int = 32,
                 val_size: float = 0.1,
                 no_cache= False):
        super().__init__()

        self.data_dir = data_dir
        self.batch_size = batch_size
        self.val_size = val_size
        
    def setup(self, stage: Optional[str] = None):
        
        self.train_features = torch.from_numpy(np.load(os.path.join(self.data_dir, 'v45_train_feature.npy'))).to(torch.float32)
        self.train_labels = torch.from_numpy(np.load(os.path.join(self.data_dir, 'v4_train_label.npy'))).to(torch.float32)
        
        self.train_features, self.val_features, self.train_labels, self.val_labels = train_test_split(
            self.train_features, self.train_labels, test_size=self.val_size, random_state=42
        )        

        print('datset shape')
        print('train_features: ', self.train_features.shape)
        print('train_labels: ', self.train_labels.shape)
        print('val_features: ', self.val_features.shape)
        print('val_labels: ', self.val_labels.shape)

        self.train_dataset = TensorDataset(self.train_features, self.train_labels)
        self.val_dataset = TensorDataset(self.val_features, self.val_labels)

        self.setup_normalizer()

    
    def setup_normalizer(self):

        self.data_normalizer_x = MixNormalizer_8in(
            pos_min=np.min(self.train_features[..., :3].numpy(), axis=(0,1)),
            pos_max=np.max(self.train_features[..., :3].numpy(), axis=(0,1)),
            feature_mean=np.mean(self.train_features[..., 3:].numpy(), axis=(0,1)),
            feature_std=np.std(self.train_features[..., 3:].numpy(), axis=(0,1)),
        )

        self.data_normalizer_y = UnitGaussianNormalizer(
            mean=np.mean(self.train_labels.numpy()),
            std=np.std(self.train_labels.numpy()))
        
        self.normalizer_x = self.data_normalizer_x
        self.normalizer_y = self.data_normalizer_y

    def train_dataloader(self):
        return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True,num_workers=4)

    def val_dataloader(self):
        return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False,num_workers=4)
