#!/bin/bash
#
# do.sh
# Author: Andy Chu
#
# Benchmark/test shell scripts
#
# TODO:
# 
# - make all the function names match?
#
# - write JS
# - write Go -- meh, don't think it's worth it
# - write Julia (interesting type system)
#
# benchmarks?
# - test performance of R append() vs  [length+1] ?

log() {
  echo 1>&2 "$@"
}

die() {
  log "FATAL: $@"
  exit 1
}

banner() {
  echo --------------------------------------------------------------
  echo "$@"
  echo --------------------------------------------------------------
  echo
}

#
# Util
#

line-count() {
  find . -name \*.R -o -name \*.py | grep -v _tmp | xargs wc -l
}

# Run an R script.  Should probably change to using shebang.
run() {
  prog=$1
  shift
  R --vanilla --slave -f $prog --args "$@"
}

# Run a unit test in a dir
unit-test() {
  local dir=$1
  local name=$2
  pushd $dir
  $name
  local exit_code=$?
  echo
  if test "$exit_code" -eq 0; then
    echo PASS
  else
    echo FAIL
  fi
  popd >/dev/null
}


#
# JavaScript
#

# TODO: Change this to run node.js and phantom.js, not the v8 shell.
readonly JS_DIR=../json-template/javascript

v8shell() {
  $JS_DIR/v8shell/linux-i686/shell "$@"
}

phantomjs() {
  ~/dev/phantomjs/phantomjs-1.6.0-linux-i686-dynamic/bin/phantomjs "$@"
}

# Run JS tests under node.js.
js-unit-node() {
  # JSON Template dir, tnet/javascript/tnet.js
  export NODE_PATH="$JS_DIR:javascript"

  # evaluate the module first, then the test.
  node javascript/tnet_test.js
}

js-unit-shell() {
  v8shell $JS_DIR/jsunity-0.6.js javascript/tnet.js javascript/tnet_test.js
}

js-unit-phantomjs() {
  # Hack for phantomjs: inject scripts
  #phantomjs javascript/tnet_test.js $JS_DIR/jsunity-0.6.js #javascript/tnet.js
  phantomjs javascript/tnet_test.js $JS_DIR/jsunity-0.6.js javascript/tnet.js
}

# Run JS tests under all interpreters.
js-unit-all() {
  set -o errexit
  js-unit-shell
  js-unit-node
  js-unit-phantomjs
}

#
# R
#

r-unit-test() {
  unit-test R ./tnet_test.R
}

# Compare size of JSON libraries
compare-json-size() {
  #run benchmark.R > benchmark.txt
  run R/rjson.R > _tmp/rjson.txt
  run R/rjsonio.R > _tmp/rjsonio.txt
  wc -c _tmp/*.txt
}

benchmark() {
  ./benchmark.R --args "$@"
}

# Compare RJSONIO, csv, TNET.  RJSONIO must be installed.

r-bench() {
  pushd R

  # First use RJSONIO to create the text files.

  benchmark RJSONIO airquality '' > ../_tmp/airquality.tranpose.json
  benchmark tnet.R airquality '' > ../_tmp/airquality.tranpose.tnet
  benchmark RJSONIO airquality 'transpose=F' > ../_tmp/airquality.json
  benchmark tnet.R airquality 'transpose=F' > ../_tmp/airquality.tnet
  benchmark csv airquality 'transpose=F' > ../_tmp/airquality.csv

  # Then do a benchmark
  echo
  echo 'RJSONIO.toJSON airquality'
  benchmark RJSONIO airquality 'time=T;'

  echo
  echo 'tnet.dump airquality'
  benchmark tnet.R airquality 'time=T;'

  echo
  echo 'RJSONIO airquality (column order)'
  benchmark RJSONIO airquality 'time=T; transpose=F;'

  echo
  echo 'tnet airquality by (column order)'
  benchmark tnet.R airquality 'time=T; transpose=F;'

  echo
  echo 'csv airquality by (native)'
  benchmark csv airquality 'time=T; transpose=F'

  echo
  pushd ../_tmp
  wc -c *.json *.tnet *.csv
  popd

  popd
}

# 4/2013: don't think this is working
load-bench() {
  set -x
  pushd R
  benchmark tnet.R airquality 'time=T; do.load=T'
  popd
}

dump-bench() {
  pushd R
  benchmark dump-bench
  popd
}

rcmd() {
  mkfifo req
  echo -n '16:1:a,1:3#1:b,1:4#}EXTRA' > req &
  R/tnet.R --args load
}

#
# Python:
#

py-unit-test() {
  unit-test python ./tnet_test.py
}

# 153 rows
# 100 iterations, 500ms
# 5ms per iteration.  R was 93ms, or almost 20x slower
py-bench() {
  python/benchmark.py testdata/test.json "$@"
}

pycmd() {
  echo -n '16:1:a,1:3#1:b,1:4#}' | tnet to-json
  echo -n '16:1:c,1:3#1:d,1:4#}EXTRA' | tnet to-json
}


#
# Multi-language
#

WORK_DIR=_tmp/multi_test

gen-test-code() {
  # Generate code.  Needs jsontemplate from ./BUILD.sh deps.
  PYTHONPATH=../json-template/python ./tnet_multi_test.py
}


# Test correctness of Python vs R.

multi-test() {
  set -o errexit
  mkdir -p $WORK_DIR
  mkdir -p $WORK_DIR/R
  mkdir -p $WORK_DIR/py

  gen-test-code

  # Then run it
  pushd $WORK_DIR

  set +e  # opposite of errexit
  rm R/*
  rm py/*
  set -o errexit

  ./tnet_test_cases.py dump
  ./tnet_test_cases.R --args dump

  banner "Comparing dumps"

  # Get rid of noise
  diff -u --recursive R py | grep -v ^diff | grep -v ^@@

  local num_diffs=$(diff -u --recursive R py | grep '+++' | wc -l)
  echo "Number of differences dumping: $num_diffs"

  # Thse reports their own errors

  banner "Loading into Python"
  ./tnet_test_cases.py load

  banner "Loading into R"
  ./tnet_test_cases.R --args load

  # If any failed, we wouldn't get here because of errexit
  echo
  echo PASS

  popd
}

# Test performance of Python vs R.

multi-perf() {
  # Create test tnet from JSON
  set -o errexit

  local root_dir=$PWD

  pushd $WORK_DIR
  banner "Dumping from Python"
  ./tnet_test_cases.py dump-perf
  banner "Loading into Python"
  ./tnet_test_cases.py load-perf
  banner "Dumping from R"
  ./tnet_test_cases.R --args dump-perf
  banner "Loading into R"
  ./tnet_test_cases.R --args load-perf

  local BIG=airquality.tnet
  cat $root_dir/testdata/test.json | $root_dir/python/tnet.py dump > $BIG

  banner "Big test case (Python)"
  ./tnet_test_cases.py perf $BIG 10
  banner "Big test case (R)"
  ./tnet_test_cases.R --args perf $BIG 10
  popd

}

SIZES="2 6 10 14"

gen-testdata() {
  # Create a few different sizes of rows
  set -o errexit

  local root_dir=$PWD

  pushd R  # to source tnet.R
  for i in $SIZES; do
    log "Writing a${i}.tnet"
    ./testdata.R --args $i > "../$WORK_DIR/a${i}.tnet"
  done
  popd
  ls -al $WORK_DIR/a*.tnet
}

# Wow, in R, dumping is a lot faster than loading?

time-frames() {
  if test "$1" == py; then
    command=./tnet_test_cases.py
    banner "Loading with Python"
  else
    command="./tnet_test_cases.R --args"
    banner "Loading with R"
  fi

  pushd $WORK_DIR > /dev/null
  for i in $SIZES; do
    log "Testing with file a$i.tnet"
    # 1 means 1 replica
    $command perf a$i.tnet 1
  done
  popd > /dev/null
}

# Uses custom join-lines tool
make-csv() {
  echo '"rows.153","load.ms","dump.ms"'
  cat $1 | join-lines 3 ' ' | grepy '(\d+) .* ([\d.]+) ms .* ([\d.]+) ms' -t '$1,$2,$3'
}

#
# Haskell
#

haskell-tests() {
  set -o errexit  # fail on first error
  pushd haskell/tests > /dev/null
  ghc -package test-framework -package test-framework-hunit -threaded tests.hs -o test_haskell
  ./test_haskell

  rm *.o *.hi test_haskell
  popd haskell > /dev/null
}

#
# tnet tool
#

tnet() {
  # tnet.py is in python/, various actions are in tool/
  PYTHONPATH=.:tool bin/tnet.py "$@"
}

#
# Raw demo
#

raw() {
  pushd R
  mkfifo raw-fifo
  #echo hello > raw-fifo &
  python -c 'print u"\u00B5".encode("utf-8") * 20;' > raw-fifo &
  #cat raw-fifo
  ./raw_demo.R raw-fifo
  popd > /dev/null
}

"$@"
