#!/usr/bin/env xonsh
from concurrent.futures import ThreadPoolExecutor
from fire import Fire
from multiprocessing import cpu_count
from os.path import basename
from sqlalchemy import create_engine,types
import pandas as pd

def parse(filepath):
  csvfile = filepath[:-4]
  filename = basename(csvfile)[:-4]
  zstdcat @(filepath) > @(csvfile)
  engine = create_engine('postgresql://root:hy7gzy@172.18.0.3:5432/db')
  chunksize=500000
  iter = pd.read_csv(csvfile,chunksize=chunksize)
  for pos, df in enumerate(iter):
    print(filepath, pos*chunksize)
    df.columns = [c.lower() for c in df.columns]
    if pos:
      if_exists = 'append'
    else:
      if_exists = 'replace'
    df.to_sql(
      filename,engine,index=False,if_exists=if_exists,
      dtype=dict(
        model=types.String,
        sub_model=types.String,
        mcc=types.String,
        phone=types.BigInteger,
        create_time=types.Integer
      )
    )
  rm @(csvfile)

@Fire
def main(filepath=None):
  if filepath is None:
    with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
      for filepath in $(find /root/zst/*.csv.zst -type f).split():
        executor.submit(parse, filepath)
  else:
    parse(filepath)
