/* Copyright (c) CERN
 * * Artistic License 2.0
 * */

import java.io.*;
import java.util.*;
import java.lang.Math;

/**
 * Analyses a trace and generates a file containing prediction results for various runs.
 * Predictions are made for various cache sizes and sample set sizes. Each prediction is 
 * made a number of times using different sample sets. The mean and standard deviation are given.
 * Results may be plotted with gnuplot. An example is provided in 'plot/estimate.pl'.
 * <p>
 * Usage: java Analysis trace_file out_file distro min [iterations]
 *    trace_file : a file containing a trace of memory accesses. Each line is a memory address.
 *    out_file : a generated file containing classified mean and std of prediction results,
 *               and additional information for plots. See 'plot/estimate.pl' for display.
 *    distro : distribution used for the estimation. GPD, GPD-biased, or GPD-truncated.
 *             It stands for Generalized Pareto Distribution, biased to fit values higher
 *             than 'min', or truncated at 'min' (lower values discarded).
 *    min : value under which the quality of the fit is not important.
 *
 * @author Xavier Grehant
 */
public class Analysis {

/**
 * Maximum number of samples used for a prediction.
 * For a given cache size, a prediction is done for sample sets whose size range from 'minnsamples'
 * to 'maxnsamples', and two successive sizes differ by a factor 'nsamplesfactor'.
 */
   static int maxnsamples = 100000000;

/**
 * Minimum number of samples used for a prediction.
 * For a given cache size, a prediction is done for sample sets whose size range from 'minnsamples'
 * to 'maxnsamples', and two successive sizes differ by a factor 'nsamplesfactor'.
 */
   static int minnsamples = 8;

/**
 * Size ratio between two successive sample sets.
 * For a given cache size, a prediction is done for sample sets whose size range from 'minnsamples'
 * to 'maxnsamples', and two successive sizes differ by a factor 'nsamplesfactor'.
 */
   static int nsamplesfactor = 2;

/**
 * Number of samples considered from the trace, starting from the first sample.
 * If this number is bigger than 2147000000, it must be written as a product of smaller numbers.
 * Check that nscanned / (maxnsamples * iterations) > 1
 * otherwise you will always get the same samples set in the end, and a variance = 0.
 */
   static long nscanned = 2800000000L;

/**
 * Number of times a prediction is made for a given cache size and sample set size,
 * using different sample sets to calculate its mean and std.
 */
   static int iterations = 20;

/**
 * Smallest cache size for cache misses prediction.
 */
   static int mincachesize = 32768;

/**
 * Size ratio between two successive caches for which cache misses prediction is made.
 */
   static int cachesizefactor = 2;

/**
 * Number of caches on which cache misses prediction is made.
 */
   static int nbcaches = 3;

/**
 * Instruction size in Bytes.
 */
   static int instrsize = 8;

/**
 * The mean of an array of integers.
 * @return the mean.
 * @param array an array of integers.
 */
   static double mean(int[] array) {
      double mean = 0d;
      for (int i = 0; i < array.length; i++) {
         mean += (double) array[i] / (double) array.length;
      }
      return mean;
   }

/**
 * The variance of an array of integers.
 * @return the variance.
 * @param array an array of integers.
 * @param mean the mean of the array.
 */
   static double variance(int[] array, double mean) {
      double variance = 0d;
      for (int i = 0; i < array.length; i++) {
         variance += Math.pow((double) array[i] - mean, 2d) / (double) array.length;
      }
      return variance;
   }

/**
 * The mean of an array of doubles.
 * @return the mean.
 * @param array an array of doubles.
 */
   static double mean(double[] array) {
      double mean = 0d;
      for (int i = 0; i < array.length; i++) {
         mean += array[i] / (double) array.length;
      }
      return mean;
   }

/**
 * The max of an array of integers.
 * @return the max.
 * @param array an array of integers.
 */
   static double max(double[] array) {
      double max = 0d;
      for (int i = 0; i < array.length; i++) {
         if (max < array[i]) max = array[i];
      }
      return max;
   }

/**
 * The standard deviation of an array of doubles.
 * @return the standard deviation.
 * @param array an array of doubles.
 * @mean the mean of the array.
 */
   static double std(double[] array, double mean) {
      double variance = 0d;
      for (int i = 0; i < array.length; i++) {
         variance += Math.pow(array[i] - mean, 2d) / (double) array.length;
      }
      return Math.sqrt(variance);
   }

/**
 * The variance of an array of doubles.
 * @return the variance.
 * @param array an array of doubles.
 * @param mean the mean of the array.
 */
   static double var(double[] array, double mean) {
      double variance = 0d;
      for (int i = 0; i < array.length; i++) {
         variance += Math.pow(array[i] - mean, 2d) / (double) array.length;
      }
      return variance;
   }

/**
 * This method skips a number of samples and return false if we're done with the file.
 * True doesn't mean we're not done. We may just have eaten the last line by chance.
 * @return whether or not the expected number of samples were skipped before the file end.
 * @param rinput a bufferedReader as input.
 * @param offset the number of samples to skip.
 */
   static boolean skip(BufferedReader rinput, long offset) throws IOException {
      if (rinput == null) return false;
      for (int i = 0; i < offset; i++) {
         String line = rinput.readLine();
         if (line == null) return false;
      }
      return true;
   }

/**
 * This method checks if a memory access yields a cache miss, for each cache size considered.
 * If yes, it increments a counter.
 * @return 
 * @param distance stack distance read on the trace
 * @param mcounts cache miss counter list. One counter per cache size.
 */
   public static void account(int distance, int[] mcounts) {
      int cachesize = mincachesize / cachesizefactor;
      for (int i = 0; i < nbcaches; i++) {
         cachesize *= cachesizefactor;
         if (distance > cachesize / instrsize) mcounts[i]++;
      }
   }

/**
 * This method logs user information both to the output file and to the screen.
 * In the output file the information is commented with gnuplot syntax.
 * @return
 * @param explanation user information.
 * @param wout output file as a FileWriter.
 */
   static void log(String explanation, FileWriter wout) throws IOException {
      wout.write("# " + explanation + "\n");
      System.out.println(explanation);
   }

/**
 * This method prints initial information on the output file, used to draw plots.
 * It displays and comments with gnuplot syntax:
 *     - the number of non-infinite stack distances in the trace (re-uses)
 *     - the number of first uses (compulsory misses)
 *     - the number of conflict misses for each cache size
 *     - the sample set sizes
 * This information is not automatically retrieved by gnuplot. It has to be manually
 * retrieved by the user who plots the data. We make sure here that this information
 * is there along with the corresponding plot data.
 */
   static void preprint(FileWriter woutput, File input) throws IOException {
      long dcount = 0L; // number of samples
      long ccount = 0L; // number of compulsory misses
      BufferedReader rinput = new BufferedReader(new FileReader(input));
      String read;
      // count stuff
      int[] mcounts = new int[nbcaches];
      while ((read = rinput.readLine()) != null) {
         dcount++;
         int value = new Integer(read).intValue();
         if (value == 0) ccount++;
         else account(value - 1, mcounts);
      }
      log("re-uses: " + (dcount - ccount) + ", first uses: " + ccount, woutput);

      String explanation = "";
      int cachesize = mincachesize / cachesizefactor;
      for (int value : mcounts) {
        cachesize *= cachesizefactor;
        explanation += " " + cachesize + ":" + value;
      }
      log("cache size:related misses", woutput);
      log(explanation, woutput);

      explanation = "";
      for (int nsamples = minnsamples; nsamples < maxnsamples; nsamples *= nsamplesfactor) {
         explanation += " " + nsamples;
      }
      log("sizes of sample sets", woutput);
      log(explanation, woutput);
   }

/**
 * This method makes the predictions. For one sample set, cache misses are predicted for all cache sizes.
 * For one sample set size, a number (iterations) of different sample sets are taken.
 * Here is what the method does. It loops over the sample set sizes and inside that it loops over the
 * iterations. For each iteration it picks samples in the trace, estimates the distribution and predicts
 * the number of cache misses for each cache size, registers it in a list.
 * Each iteration produces a list for each cache size. When the iterations are completed for a given sample set
 * size, the mean and std are calculated across iterations, for each cache size, and printed.
 * @return
 * @param woutput output file as a FileWriter
 * @param input trace file as a File
 * @param args user arguments passed through 'main' defining the prediction type.
 */
   static void printProbs(FileWriter woutput, File input, String[] args) throws IOException {
      Random rand = new Random();
      for (int nsamples = minnsamples; nsamples < maxnsamples; nsamples *= nsamplesfactor) {
         System.out.println("xag-iteration: " + nsamples);
         // Number of samples to drop
         int sstep = (int) nscanned / nsamples; // between samples in a run
         int iterstep = (int) sstep / iterations; // offset between adjacent runs

         // For all iterations, and all cache sizes, keep the probability found.
         double[][] probabilities = new double[nbcaches][iterations];

         for (int iter = 0; iter < iterations; iter++) {

            // Re-open file for each run.
            BufferedReader rinput = new BufferedReader(new FileReader(input));

            int[] distances = new int[nsamples];
            // Keep all sample values for the run.

            // offset between sample sets
            int iteroffset = iterstep * iter;
            skip(rinput, iteroffset - 1);
            int randomized = 0;
            for (int sample = 0; sample < nsamples; sample++) {
               String read = rinput.readLine();
               if (read == null) break;
               Integer distance = new Integer(read).intValue();
               distances[sample] = distance;
               // skip randomized number of samples
               
                      randomized = 1 + rand.nextInt((sstep - 1) * 2 - 1);
                      if (!skip(rinput, randomized)) break;
            } 
            // We don't need the file any longer for this iteration.
            rinput.close();

            // Now calculate the stuff
                Discrete discrete = Discrete.createDefault(distances, mincachesize / (2 * instrsize));
                // We first remove the discrete parts.
                log(discrete.toString(), woutput);
                distances = discrete.cleanOut(distances);

                GPD gpd;
                if (args[2].contains("GPD")) gpd = new GPD(distances);
                else {
                   if (args[2].contains("biased")) {
                      int refinements = (new Integer(args[4])).intValue();
                      gpd = GPD.biased(distances, mincachesize / instrsize, refinements);
                   }
                   else if (args[2].contains("trunc")) gpd = new GPD(distances, mincachesize / instrsize);
                   else throw new IllegalArgumentException("Wrong estimation type");
                }

            int cachesize = mincachesize / cachesizefactor;
            // first value will be multiplied back.
            for (int cacheindex = 0; cacheindex < nbcaches; cacheindex++) {
               cachesize *= cachesizefactor;
               int cachecapacity = cachesize / instrsize;
               // divide by average size of an instruction in bytes.

               double probability = 1d - gpd.cdf(cachecapacity);
               probabilities[cacheindex][iter] = probability; // Remember it.
            }

         }

         float[] mean = new float[nbcaches];
         float[] max = new float[nbcaches];
         float[] std = new float[nbcaches];
         String outputline = "";
         for (int cacheindex = 0; cacheindex < nbcaches; cacheindex++) {
            mean[cacheindex] = (float) mean(probabilities[cacheindex]);
            max[cacheindex] = (float) max(probabilities[cacheindex]);
            std[cacheindex] = (float) std(probabilities[cacheindex], mean[cacheindex]);
            outputline += " " + mean[cacheindex] + " " + max[cacheindex] + " " + std[cacheindex];
         }
      woutput.write(nsamples + outputline + "\n");
      }
   }

/**
 * This method cleans a trace file so that it is readable by the program.
 * @return
 * @param cleanTmpFile a generated temporary file.
 * @param input initial trace file to be cleaned.
 */
   static void clean(File cleanTmpFile, File input) throws IOException {
      long newSize = 0;
      BufferedReader rinput = new BufferedReader(new FileReader(input));
      FileWriter rfile = new FileWriter(cleanTmpFile);
      String read;
      while ((read = rinput.readLine()) != null) {
         if (!read.startsWith("0") && !read.startsWith("#")) rfile.write(read + "\n");
         newSize ++;
      }
      rinput.close();
      rfile.close();
      reviewAttributes(newSize);
   }

/**
 * This method checks static attributes in case they are changed by the user.
 * It may reduce the user-defined 'maxnsamples' and 'nscanned'.
 * @return
 * @param newSize the maximum number of samples that can be considered.
 */
   static void reviewAttributes(long newSize) {
      if (nscanned > newSize) {
         nscanned = newSize;
         maxnsamples = (int) (newSize / iterations);
      }
      System.out.println("attributes changed: nscanned = " + nscanned + ", maxnsamples = " + maxnsamples);
   }

/**
 * This method is the main method called by the user.
 * It defines and some parameters, and prints the output file.
 * @param args user-defined parameters (see class documentation for usage).
 */
   public static void main(String[] args) {
      try {
      // Where is our input file?
      if (args.length < 4) throw new IllegalArgumentException(
                    "args: inputfile, outputfile, estimation (GPD, trunc or biased), mindistance, [refinements]");
      File input = new File(args[0]);
      // Where do we write plot data?
      File output = new File(args[1]);
      FileWriter woutput = new FileWriter(output);

      int mindistance = (new Integer(args[3])).intValue();
      mincachesize = mindistance * instrsize;

      // print out useful stuff
      preprint(woutput, input);

      File cleanTmpFile = new File("cleanTmp");
      clean(cleanTmpFile, input);

      // calculate and print estimated probabilities, with their var and stuff
      printProbs(woutput, cleanTmpFile, args);

      // In the end, close the output file
      woutput.close();
      } catch (Exception e) {
         e.printStackTrace();
      }
   }
}
