// This is adapted from a benchmark written by John Ellis and Pete Kovac
// of Post Communications.
// It was modified by Hans Boehm of Silicon Graphics.
//
// 	This is no substitute for real applications.  No actual application
//	is likely to behave in exactly this way.  However, this benchmark was
//	designed to be more representative of real applications than other
//	Java GC benchmarks of which we are aware.
//	It attempts to model those properties of allocation requests that
//	are important to current GC techniques.
//	It is designed to be used either to obtain a single overall performance
//	number, or to give a more detailed estimate of how collector
//	performance varies with object lifetimes.  It prints the time
//	required to allocate and collect balanced binary trees of various
//	sizes.  Smaller trees result in shorter object lifetimes.  Each cycle
//	allocates roughly the same amount of memory.
//	Two data structures are kept around during the entire process, so
//	that the measured performance is representative of applications
//	that maintain some live in-memory data.  One of these is a tree
//	containing many pointers.  The other is a large array containing
//	double precision floating point numbers.  Both should be of comparable
//	size.
//
//	The results are only really meaningful together with a specification
//	of how much memory was used.  It is possible to trade memory for
//	better time performance.  This benchmark should be run in a 32 MB
//	heap, though we don't currently know how to enforce that uniformly.
//
//	Unlike the original Ellis and Kovac benchmark, we do not attempt
// 	measure pause times.  This facility should eventually be added back
//	in.  There are several reasons for omitting it for now.  The original
//	implementation depended on assumptions about the thread scheduler
//	that don't hold uniformly.  The results really measure both the
//	scheduler and GC.  Pause time measurements tend to not fit well with
//	current benchmark suites.  As far as we know, none of the current
//	commercial Java implementations seriously attempt to minimize GC pause
//	times.
//
//	Known deficiencies:
//		- No way to check on memory use
//		- No cyclic data structures
//		- No attempt to measure variation with object size
//		- Results are sensitive to locking cost, but we dont
//		  check for proper locking

import java.util.Random;  // @jsinger

import org.mmtk.utility.heap.HeapGrowthManager;

/**
 * Binary tree node
 * leaf nodes have null left and right pointers
 * Each node has two integer values as its payload
 * (Note we don't touch these at all in the benchmark.)
 */
class Node {
	Node left, right;
	int i, j;
	Node(Node l, Node r) { left = l; right = r; }
	Node() { }
}

/**
 * The Ellis GC benchmark, originally a GC stress test.
 * @jsinger - modified to use as a tuning load for
 * PID tuning of heap resizing mechanism
 *
 * The benchmark has three distinct phases:
 * 1) create a 'stretch' tree to warm up the heap (expand it to the right 
 *     ballpark size). throw this tree away once constructed
 * 2) create some long-lived data - a long-lived tree and an integer array
 * 3) create some short-lived data (aka churn data). Lots of trees of 
 *     differing sizes. Throw these away some time shortly after creation.
 */
public class GCBench {

  /**
   * depth of tree for initial stretch phase. EllisGC default was 18, which gives a 
   * stretch tree size of 16MB. 
   * @jsinger - now set this to kLongLivedTreeDepth+1
   */
  public static int kStretchTreeDepth;

  /**
   * depth of long-lived tree. EllisGC default was 16, which gives a 
   * long-lived tree size of 4MB.
   * Now the long-lived tree size (in MB) is set using the -m 
   * cmd line parameter
   */
  public static int kLongLivedTreeDepth;

  /**
   * size of long-lived int array. Ellis GC is 500000, which 
   * is around 4MB
   */
  public static final int kArraySize  = 500000; 

  /**
   * min and max depths of short-lived trees created in
   * phase 3. 
   * @jsinger - retain EllisGC defaults here.
   */
  public static final int kMinTreeDepth = 4;
  public static final int kMaxTreeDepth = 16; 

  /**
   * Use this array of Node references to store
   * enough of the short-lived trees to fill the short-lived
   * region of memory.
   */
  static Node[] gaussianLiveNodes;  // use to retain some proportion of churn-generated trees as live

  /**
   * We require random numbers for Gaussian sizing of live nodes array
   * Seed is set to 0.
   */
  static Random rng;


  //////////////////////
  // utility methods
  //////////////////////


  // Nodes used by a tree of a given depth i
  // == 2^(i+1) - 1
  static int TreeSize(int i) {
    return ((1 << (i + 1)) - 1);
  }

  // Number of iterations to use for a given tree depth
  // @jsinger - i.e. how many short-lived trees to create of depth i.
  // This will ensure that, for each tree depth, we allocate
  // twice as much memory as in the stretching phase.
  static int NumIters(int i) {
    return 2 * TreeSize(kStretchTreeDepth) / TreeSize(i);
  }

  // Build tree top down, assigning to older objects. 
  static void Populate(int iDepth, Node thisNode) {
    if (iDepth<=0) {
      return;
    } else {
      iDepth--;
      thisNode.left  = new Node();
      thisNode.right = new Node();
      Populate (iDepth, thisNode.left);
      Populate (iDepth, thisNode.right);
    }
  }

  // Build tree bottom-up
  static Node MakeTree(int iDepth) {
    if (iDepth<=0) {
      return new Node();
    } else {
      return new Node(MakeTree(iDepth-1),
		      MakeTree(iDepth-1));
    }
  }

  static void PrintDiagnostics() {
    long lFreeMemory = Runtime.getRuntime().freeMemory();
    long lTotalMemory = Runtime.getRuntime().totalMemory();
    
    System.out.print(" Total memory available="
		     + lTotalMemory + " bytes");
    System.out.println("  Free memory=" + lFreeMemory + " bytes");
  }

  /**
   * create a number of short-lived trees of a specific depth,
   * and retain enough references to fill the short-lived memory region
   */
  static void TimeConstruction(int depth, int varyingMemMax, boolean gaussianHighWaterMark) {
    Node    root;
    long    tStart, tFinish;
    int 	iNumIters = NumIters(depth);
    Node	tempTree;
    
    // first setup array
    // (1) work out how much mem we want in this iteration
    // Mem measurements given in MB
    // use Gaussian: X = mu + sigma Z,
    // where X is mem size, Z is random value from unit gaussian distr
    int varyingMem;
    if (gaussianHighWaterMark) {
      varyingMem = (int)(((double)varyingMemMax/2) + (rng.nextGaussian()*((double)varyingMemMax/6)));
    }
    else {
      varyingMem = varyingMemMax;
    }
    
    if (varyingMem <0) {
      varyingMem = 0;
    }
    else if (varyingMem > varyingMemMax) {
      varyingMem = varyingMemMax;
    }
    System.out.println("*********** varyingMem size is : " + varyingMem + " MB");
    // (2) work out how many trees we need to reference
    // to achieve this live mem level
    int numSlots = varyingMem * TreeSize(14) / TreeSize(depth);
    // allocate new array of this size
    gaussianLiveNodes = new Node[numSlots];
    System.out.println("numSlots is: " + numSlots);
    System.out.println("numIters is: " + iNumIters);
    
    System.out.println("Creating " + iNumIters +
		       " trees of depth " + depth);
    tStart = System.currentTimeMillis();
    for (int i = 0; i < iNumIters; ++i) {
      tempTree = new Node();
      Populate(depth, tempTree);
      
      // @jsinger - store tree into array
      if (numSlots > 0) {
	gaussianLiveNodes[i%numSlots] = tempTree;
      }
      
      tempTree = null;
    }
    tFinish = System.currentTimeMillis();
    System.out.println("\tTop down construction took "
		       + (tFinish - tStart) + "msecs");
    tStart = System.currentTimeMillis();
    for (int i = 0; i < iNumIters; ++i) {
      tempTree = MakeTree(depth);
      tempTree = null;
    }
    tFinish = System.currentTimeMillis();
    System.out.println("\tBottom up construction took "
		       + (tFinish - tStart) + "msecs");
    
  }
  
  /**
   * EllisGC execution
   */
  public static void main(String args[]) {
    Node	root;
    Node	longLivedTree;
    Node	tempTree;
    long	tStart, tFinish;
    long	tElapsed;

    long churnAlloc;  // @jsinger - amount of short-lived data constructed, specify in MB on cmd line with -t
    int baselineMem;  // @jsinger - guideline value for long-lived data structure size, specify in MB on cmd line with -m
    double highWaterMarkProportion = 0.0; // @jsinger - (proportion of baselineMem MB), as region for short-lived objects 
    boolean gaussianHighWaterMark = false;  // @jsinger - should we resize heap using Gaussian distr probability for size of short-lived region

    rng = new Random(0);
    
    // args processing
    // default args
    churnAlloc = 100; // 100 MB
    baselineMem = 10; // 10 MB
    
    try {
      if (args.length > 0) {
	for (int i=0; i<args.length; i++) {
	  if (args[i].equals("-help") ||
	      args[i].equals("-?")) {
	    usage();
	    System.exit(0);
	  }
	  else if (args[i].equals("-t")) {
	    // -t CHURNALLOC_IN_MB
	    churnAlloc = Integer.parseInt(args[++i]);
	  }
	  else if (args[i].equals("-m")) {
	    // -m BASELINEMEM_IN_MB
	    baselineMem = Integer.parseInt(args[++i]);
	  }
	  else if (args[i].equals("-h")) {
	    // -h HIGH_WATERMARK_PROPORTION
	    highWaterMarkProportion = Double.parseDouble(args[++i]);
	    if (highWaterMarkProportion<0) {
	      System.err.println("-h value must be non-negative");
	      throw new Exception();
	    }
	  }
	  else if (args[i].equals("-g")) {
	    gaussianHighWaterMark = true;
	  }
	  else {
	    usage();
	    System.exit(-1);
	  }
	}
      }
    }
    catch (Exception e) {
      usage();
      System.exit(-1);
    }
    
    // @jsinger
    // calculate kLongLivedTreeDepth from baselineMem
    // (I hacked this equation by playing with the
    // benchmark and fitting a function. Good values
    // of baselineMem are: 25, 50, 100, 200, 400MB)
    kLongLivedTreeDepth = 
      (int)(Math.ceil((Math.log(baselineMem) / Math.log(2.0)) + 14.35));
    kStretchTreeDepth = kLongLivedTreeDepth +1;
    
    System.out.println("Garbage Collector Test");
    System.out.println(
		       " Stretching memory with a binary tree of depth "
		       + kStretchTreeDepth);
    PrintDiagnostics();
    tStart = System.currentTimeMillis();
    
    // Stretch the memory space quickly
    tempTree = MakeTree(kStretchTreeDepth);
    tempTree = null;
    
    // Create a long lived object
    System.out.println(
		       " Creating a long-lived binary tree of depth " +
		       kLongLivedTreeDepth);
    longLivedTree = new Node();
    Populate(kLongLivedTreeDepth, longLivedTree);
    
    // Create long-lived array, filling half of it
    System.out.println(
		       " Creating a long-lived array of "
		       + kArraySize + " doubles");
    double array[] = new double[kArraySize];
    for (int i = 0; i < kArraySize/2; ++i) {
      array[i] = 1.0/i;
    }
    PrintDiagnostics();
    
    // calculate how much mem we allocate per churn iteration
    int churnAllocPerIter = (kStretchTreeDepth/2) * ((kMaxTreeDepth-kMinTreeDepth)/2 +1);
    System.out.println("++++++ we are allocating " + churnAllocPerIter + "MB of data per churn iteration");
    boolean impulse = false;  // have we called the HeapGrowthManager impulse() function yet?
    // @jsinger - start of churn
    do {
      System.out.println("new churn iteration...");
      for (int d = kMinTreeDepth; d <= kMaxTreeDepth; d += 2) {
	int varyingMemMaxSize = (int)(highWaterMarkProportion*baselineMem);
	TimeConstruction(d, varyingMemMaxSize, gaussianHighWaterMark);
      }
      
      if (longLivedTree == null || array[1000] != 1.0/1000)
	System.out.println("Failed");
      // fake reference to LongLivedTree
      // and array
      // to keep them from being optimized away
      
      // @jsinger - now we are ready for an impule
      if (!impulse) {
	HeapGrowthManager.readyForImpulse();
	impulse = true;
      }

      churnAlloc -=churnAllocPerIter;
    } while (churnAlloc > 0);
    
    tFinish = System.currentTimeMillis();
    tElapsed = tFinish-tStart;
    PrintDiagnostics();
    System.out.println("Completed in " + tElapsed + "ms.");
  } // main()

  /**
   * command line usage message.
   * Invoke with java GCBench -? 
   * or java GCBench -help
   */
  public static void usage() {
    System.err.println("Ellis GC benchmark, modified by Jeremy Singer\n");
    System.err.println("usage: java GCBench [-t alloc_mb -m longlived_mb -h shortlived_proportion -g]");
    System.err.println(" -t alloc_mb : int \n\t specifies amount of MB of short-lived (churn) data to allocate"); 
    System.err.println(" -m longlived_mb : int \n\t specifies size of long-lived tree data structure (ideally a multiple of 25MB)");
    System.err.println(" -h shortlived_proportion : float >=0.0 \n\t specifies max amount of heap mem occupied by short-lived (churn) data");
    System.err.println("     as a multiple of the long-lived data size");
    System.err.println(" -g : \n\t turns on the Gaussian sizing for the short-lived data amount, max size still specified by -h\n");
    System.err.println("** Note that the actual live data on the heap of Jikes RVM will be more than the sum of the");
    System.err.println("    long-lived data and the short-lived data created by the benchmark.");
    System.err.println("    There is also around");
    System.err.println("       9MB is for Jikes RVM internal data structures");
    System.err.println("       4MB is for a long-lived integer array");
    System.err.println("      and some overhead for the array of references required to hold the short-lived data\n\n");
    System.err.println("GCBench (aka EllisGC) is based on the Java source code available from:\n   http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_bench.html\n");
  } // usage()
  
} // class JavaGC
