from concurrent.futures import ThreadPoolExecutor
import math
import logging
import traceback

import pandas as pd
from sqlalchemy import create_engine


def _gen_accounting_1_engine():
  engine = create_engine(
      'mysql+mysqldb://presto:cRQb#3rBS%WvE3cz@coin-db.cc4jwamep7jm.ap-northeast-2.rds.amazonaws.com/coin_accounting_20180302',
      echo=False)
  return engine


def _gen_accounting_2_engine():
  engine = create_engine(
      'mysql+mysqldb://coin_importer:cgxt4!%W$2f31QW$@coin-db.cc4jwamep7jm.ap-northeast-2.rds.amazonaws.com/coin_accounting_20180508',
      echo=False)
  return engine


def cleanup_table_by_account(engine, table, start_dt, end_dt, account_id):
  logging.info('account_id: %s, table: %s' % (account_id, table))
  dropped = 0

  if table == 'AccountBalanceHistory':
    primary_key = 'balance_hid'
  elif table == 'AccountPositionHistory':
    primary_key = 'position_hid'
  elif table in ('BalanceEstimateHistory', 'PositionEstimateHistory'):
    primary_key = 'hid'
  else:
    raise ValueError('Unknown table: %s' % table)

  sql1 = """
SELECT COUNT(*) As count FROM %s WHERE account_id = %s
""" % (table, '%s')

  sql2 = """
SELECT %s, query_timestamp FROM %s
WHERE account_id = %s AND %s > %s
ORDER BY %s LIMIT %s
""" % (primary_key, table, '%s', primary_key, '%s', primary_key, '%s')

  sql3 = """
DELETE FROM %s WHERE %s in (%s);
""" % (table, primary_key, '%s')

  rows_to_read = 10000
  interval_sec = 60
  interval = '%dS' % interval_sec

  sql = sql1 % (account_id)
  df_account_cnt = pd.read_sql(sql, engine)
  row_cnt = df_account_cnt['count'][0]
  iter_cnt = math.ceil(row_cnt / float(rows_to_read))
  last_max_hid = 0
  for i in range(iter_cnt):
    sql = sql2 % (account_id, last_max_hid, rows_to_read)
    df = pd.read_sql(sql, engine)
    df['query_timestamp'] = pd.to_datetime(df['query_timestamp'])
    first_dt = df['query_timestamp'].iloc[0]
    last_dt = df['query_timestamp'].iloc[-1]
    last_max_hid = df[primary_key].iloc[-1]
    if first_dt > end_dt:
      break
    if last_dt < start_dt:
      continue
    df = df.set_index('query_timestamp')
    resampled_df = df[primary_key].resample(interval).first()
    orig_hid = set(df[primary_key])
    resampled_hid = set([int(i) for i in resampled_df.dropna()])
    hid_to_drop = list(orig_hid.difference(resampled_hid))
    if len(hid_to_drop) > 0:
      account_id_str = ','.join([str(hid) for hid in hid_to_drop])
      sql = sql3 % (account_id_str)
      engine.execute(sql)
      logging.info('account_id: %s, dropped: %s' % (account_id, len(hid_to_drop)))
      dropped += len(hid_to_drop)
  return dropped


def cleanup_table(engine, table, start_dt, end_dt):
  sql0 = """
SELECT DISTINCT(account_id) AS account_id FROM %s
""" % table

  max_workers = 16
  df = pd.read_sql(sql0, engine)
  # Use ThreadPoolExecutor instead of ProcessPoolExecutor for 2 reasons:
  # 1. sqlalchemy engine is thread-safe, but not fork-safe
  # 2. most time is spent on waiting IO
  # https://stackoverflow.com/questions/52537741/multiprocessing-psycopg2-typeerror-cant-pickle-thread-rlock-objects
  with ThreadPoolExecutor(max_workers=max_workers) as executor:
    future_list = []
    func = cleanup_table_by_account
    for account_id in df['account_id']:
      future = executor.submit(func, engine, table, start_dt, end_dt, account_id)
      future_list.append((future, account_id, table))

  dropped = 0
  for future, account_id, table in future_list:
    try:
      dropped += future.result()
    except Exception as e:
      logging.error('Fail to clean up account_id: %s, table: %s.', account_id, table, type(e), e)
      logging.error('%s\n%s\n%s' % (type(e), e, traceback.format_exc()))
  logging.info('Total dropped entries: %s' % dropped)


def main():
  engine1 = _gen_accounting_1_engine()
  engine2 = _gen_accounting_2_engine()
  start_dt = pd.to_datetime('2021-09-01')
  end_dt = pd.to_datetime('2021-12-31')
  assert end_dt > start_dt and start_dt <= datetime.datetime(2022, 7, 21)
  cleanup_table(engine2, 'AccountBalanceHistory', start_dt, end_dt)
  cleanup_table(engine2, 'BalanceEstimateHistory', start_dt, end_dt)
  cleanup_table(engine2, 'AccountPositionHistory', start_dt, end_dt)
  cleanup_table(engine2, 'PositionEstimateHistory', start_dt, end_dt)


if __name__ == '__main__':
  logging.basicConfig(level='INFO', format='%(levelname)s %(asctime)s %(name)s] %(message)s')
  main()
