#!/usr/bin/env python3
# Author: Armit
# Create Time: 2023/10/09

from pathlib import Path
from typing import *

import numpy as np
from numpy import ndarray
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

import torch
from torch.utils.data import DataLoader, Dataset, random_split

BASE_PATH = Path(__file__).parent.absolute()
DATA_PATH = BASE_PATH / 'data' ; DATA_PATH.mkdir(exist_ok=True)
DATA_FILE = DATA_PATH / 'data.npy'


def load_data() -> Tuple[ndarray, ndarray]:
  XY = np.load(DATA_FILE)
  X: ndarray = XY[:, :1024]
  Y: ndarray = XY[:, 1024:]

  # value range sanitize
  if 'log':
    X = np.log(X)
    Y = np.log(Y)

  if not 'debug':
    plt.subplot(121) ; plt.hist(X.flatten()) ; plt.title('X')
    plt.subplot(122) ; plt.hist(Y.flatten()) ; plt.title('Y')
    plt.show()

  return X, Y

def process_data(split_ratio:float=0.25, pca_dim:int=None) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
  X, Y = load_data()

  if pca_dim: # 做PCA降维
    pca = PCA(n_components=pca_dim)
    newX = pca.fit_transform(X)
    print('sum(explained_variance_ratio_):', sum(pca.explained_variance_ratio_))
  else:
    newX = X

  # 先划分数据集
  train_X, test_X, train_y, test_y = train_test_split(newX, Y, test_size=split_ratio)
  stand = StandardScaler()
  stand.fit(train_X)
  train_x = stand.transform(train_X)
  test_x  = stand.transform(test_X)
  return train_x, test_x, train_y, test_y


class MyDataset(Dataset):

  def __init__(self, x:ndarray, y:ndarray):
    self.x = torch.from_numpy(x)
    self.y = torch.from_numpy(y)

  def __len__(self):
    return len(self.x)

  def __getitem__(self, idx):
    return self.x[idx], self.y[idx]


def get_dataloaders(batch_size:int=128, split_ratio=0.25, pca_dim=None) -> Tuple[DataLoader, DataLoader]:
  train_x, test_x, train_y, test_y = process_data(split_ratio, pca_dim)
  train_loader = DataLoader(MyDataset(train_x, train_y), batch_size, shuffle=True)
  test_loader  = DataLoader(MyDataset(test_x,  test_y),  batch_size, shuffle=True)
  return train_loader, test_loader


if __name__ == '__main__':
  train_x, test_x, train_y, test_y = process_data()
  print(train_x.shape)
  print(test_x.shape)
  print(train_y.shape)
  print(test_y.shape)
