#!/usr/bin/env python3
# Author: Armit
# Create Time: 2022/11/18 

import pickle as pkl
from pathlib import Path

import pandas as pd
from pandas_profiling import ProfileReport

from data import DATA_PATH, OUT_PATH, DATA_FILE, DTYPE_STORAGE, DTYPE_PROCESS
from data import FEATURE_NUM, FEATURE_CAT, FEATURE_ALL
from plot import plot_stats

Path(DATA_PATH).mkdir(parents=True, exist_ok=True) 
Path(OUT_PATH) .mkdir(parents=True, exist_ok=True) 


def df_memory_usage(df:pd.DataFrame) -> float:
  return sum(df.memory_usage(deep=True)) / 2 ** 20


def preprocess():
  # log open
  fh = open(Path(OUT_PATH) / 'stats.txt', 'w', encoding='utf-8')
  def log(s):
    print(s)
    fh.write(s + '\n')

  # load data
  df = pd.read_csv(Path(DATA_PATH) / DATA_FILE)
  log(f'len(df): {len(df)}')
  log(f'df.columns({len(df.columns)}): {list(df.columns)}')
  log(f'df.memory_usage: {df_memory_usage(df):.3f} MB')
  print(df.head(n=5))

  # filter & sort columns
  df = df[FEATURE_ALL]
  columns = list(df.columns)
  log(f'filtered df.columns({len(columns)}): {columns}')

  # ask for de-duplicate
  df_dedup = df.drop_duplicates()
  len_df, len_dfd = len(df), len(df_dedup)
  if len_df != len_dfd:
    print(f'duplicated lines detected {len_df} => {len_dfd} ({1 - len_dfd / len_df:.3%})')
    opt = ''
    while opt.lower() not in ['y', 'n']:
      opt = input('>> remove duplicates? (enter y or n): ').strip().lower()
      if opt == 'y':
        df = df_dedup
        log(f'de-duplicated len(df): {len(df)}')
        break
      elif opt == 'n':
        break
  
  # make report
  fp = Path(OUT_PATH) / 'data.html'
  if not fp.exists():
    ProfileReport(df, minimal=True).to_file(fp)

  # dtype convert for storage
  cat_dict = {}          # 'feat_name': ['cat1', 'cat2', ...]
  for ft in columns:
    if ft in FEATURE_NUM:
      v_h = df[ft].astype(DTYPE_PROCESS['num'])
      v_l = df[ft].astype(DTYPE_STORAGE['num'])
      avg_d = (v_h - v_l.astype(DTYPE_PROCESS['num'])).abs().mean()
      avg_v = v_h.abs().mean()
      log(f'  {ft} is numerical: avg={v_h.mean()}, std={v_h.std()}; precision loss: {avg_d} ({avg_d / avg_v:.3%})')
      df[ft] = v_l
    elif ft in FEATURE_CAT:
      cat_dict[ft] = cats = sorted(set(df[ft].to_list()))
      mapping = { c: i for i, c in enumerate(cats) }
      log(f'  {ft} is categorical: ord={len(cats)}')
      df[ft] = df[ft].map(lambda e: mapping[e]).astype(DTYPE_STORAGE['cat'])
    else:
      log(f'unknown column: {ft}')
      breakpoint()
  
  log(f'reduced df.memory_usage: {df_memory_usage(df):.3f} MB')

  # save data
  df.to_pickle(Path(OUT_PATH) / 'data.pkl')
  with open(Path(OUT_PATH) / 'cat_dict.pkl', 'wb') as fh:
    pkl.dump(cat_dict, fh)

  # log close
  fh.close()


if __name__ == '__main__':
  preprocess()
  plot_stats()
