#!/usr/bin/env python3
# Author: Armit
# Create Time: 2022/11/18 

import pickle as pkl
from pathlib import Path
from typing import Tuple, List

import numpy as np
import pandas as pd

DATA_FILE = '22D_UR.csv' # 新增了Prop3维度

DATA_PATH = 'data'
OUT_PATH  = 'out'
IMG_PATH  = 'img'
LOG_PATH  = 'log'

FEATURE_CAT = [
  'Prop', 'Prop3', 
  'Kind', 'Dist', 'Pair',
]
FEATURE_NUM = [
  'Ave_s1', 'Max_s1', 'Min_s1', 'RMSD_s1', 'CV_s1', 
  'Ave_s2', 'Max_s2', 'Min_s2', 'RMSD_s2', 'CV_s2', 'Nump_s', 
  'Ave_v',  'Max_v',  'Min_v',  'RMSD_v',  'CV_v',  'Nump_v', # 构象数变成了概率
]
FEATURE_ALL = FEATURE_CAT + FEATURE_NUM
TARGET      = 'Prop3'

DTYPE_STORAGE = {
  'num': np.float16,
  'cat': np.uint8,
}
DTYPE_PROCESS = {
  'num': np.float32,
  'cat': np.int32,
}


def get_df() -> pd.DataFrame:
  fp = Path(OUT_PATH) / 'data.pkl'
  print(f'[get_df] load data form {fp}')
  df = pd.read_pickle(fp)
  return df


def normalize(df:pd.DataFrame) -> pd.DataFrame:
  for col in df.columns:
    if col in FEATURE_CAT:
      df[col] = df[col].astype(DTYPE_PROCESS['cat'])

    elif col in FEATURE_NUM:
      v = df[col].astype(DTYPE_PROCESS['num'])

      # log normalize
      v_log = np.log(v + 1e-5)

      # dynamic limit
      EX = v_log.mean()
      DX = v_log.std()
      v_clip = v_log.clip(EX - 3 * DX, EX + 3 * DX)

      # N(0,1) normalize
      v_norm = (v_clip - EX) / DX

      df[col] = v_norm

  return df


def get_data(limit:int=-1, features:List[str]=FEATURE_NUM, target:str=TARGET, 
             sample_method:str='same', df:pd.DataFrame=None, shuffle=True) -> Tuple[pd.DataFrame, pd.Series]:
  # load data
  df = df if df is not None else get_df()

  # filter columns
  if target in features: features.remove(target)
  df = df[features + [target]]

  # subset sampling
  print(f'  total samples: {len(df)}')
  if limit > 0:
    n_values = cat_dict.get_cat_ord(target)
    cat_cnt = { id: sum(df[target] == id) for id in range(n_values) }

    if sample_method == 'naive':        # 随机采样
      df = df.sample(limit)
      for id in range(n_values):
        len_df_cat = sum(df[target] == id)
        print(f'  subset samples on {cat_dict.get_cat_name(target, id)}: {len_df_cat} ({len_df_cat / cat_cnt[id]:.3%})')
    elif sample_method == 'stratify':   # 分层采样（保持类别分布）
      ls = []
      for id in range(n_values):
        n = int(sum(df[target] == id) / len(df) * limit)
        df_cat = df[df[target] == id].sample(n=n)
        ls.append(df_cat)
        print(f'  subset samples on {cat_dict.get_cat_name(target, id)}: {len(df_cat)} ({len(df_cat) / cat_cnt[id]:.3%})')
      df = pd.concat(ls)
    elif sample_method == 'same':       # 固定数量采样（统一各类样本数）
      ls = []
      for id in range(n_values):
        n = limit // n_values
        df_cat = df[df[target] == id].sample(n=n)
        ls.append(df_cat)
        print(f'  subset samples on {cat_dict.get_cat_name(target, id)}: {len(df_cat)} ({len(df_cat) / cat_cnt[id]:.3%})')
      df = pd.concat(ls)
    else:
      raise ValueError(f'unkonwn sample_method {sample_method}, should be one of ["naive", "same", "stratify"]')
    print(f'  sample by method {sample_method!r} in total: {len(df)}')

  # shuffle
  if shuffle:
    df = df.sample(frac=1.0, random_state=42)
  
  # value norm
  df = normalize(df)

  # split df => (X, Y)
  X, Y = df[features], df[target]
  
  print('X.shape:', X.shape)
  print('Y.shape:', Y.shape)

  return X, Y


class CatDict:

  def __init__(self):
    self.fp = Path(OUT_PATH) / 'cat_dict.pkl'
    self.name2id = { }
    self.id2name = { }
    
    self.load()
  
  def load(self):
    with open(self.fp, 'rb') as fh:
      data = pkl.load(fh)
      for cat, values in data.items():
        self.id2name[cat] = { i: v for i, v in enumerate(values) }
        self.name2id[cat] = { v: i for i, v in enumerate(values) }

  def get_cat_ord(self, cat:str) -> int:
    return len(self.name2id[cat])

  def get_cat_name(self, cat:str, id:int) -> str:
    return self.id2name[cat][id]

  def get_cat_id(self, cat:str, name:str) -> int:
    return self.name2id[cat][name]


cat_dict = CatDict()


if __name__ == '__main__':
  df = get_df()
  for i in range(cat_dict.get_cat_ord(TARGET)):
    print(f'  {i} => {cat_dict.get_cat_name(TARGET, i)}: {sum(df[TARGET] == i)}')
