// predictor.h
// Daniel A. Jimenez's entry to the CBP contest
// This file implements idealized piecewise linear branch prediction

#include <stdio.h>
#include <cassert>
#include <string.h>
#include <inttypes.h>
#include <stdlib.h>

//#include <cstddef>
//#include <inttypes.h>
//#include <vector>
//#include <string.h>

using namespace std;
#include "cbp3_def.h"
#include "cbp3_framework.h"

// a weight is a 7-bit signed integer; it will fit in a signed char

typedef signed char weight_t;

// an "addr_t" is used to make path correlations.  it is the lower 8 bits
// of a branch PC, represented as an unsigned char

typedef unsigned char addr_t;

// a local history is up to 16 bits long, fitting in an unsigned short int

typedef unsigned short int local_history_t;

// these constants are parameters to the prediction algorithms.
// they have all been determined empirically to yield high accuracy
// on the distributed traces.  their names have been chosen to be self-
// documenting.

#define NUM_WEIGHTS 8590
#define NUM_BIASES 599
#define INIT_GLOBAL_HISTORY_LENGTH 30
#define HIGH_GLOBAL_HISTORY_LENGTH 48
#define LOW_GLOBAL_HISTORY_LENGTH 18
#define INIT_LOCAL_HISTORY_LENGTH 4
#define HIGH_LOCAL_HISTORY_LENGTH 16
#define LOW_LOCAL_HISTORY_LENGTH 1
#define EXTRA_BIAS_LENGTH 6
#define HIGH_EXTRA_BIAS_LENGTH 2
#define LOW_EXTRA_BIAS_LENGTH 7
#define EXTRA_HISTORY_LENGTH 5
#define HIGH_EXTRA_HISTORY_LENGTH 7
#define LOW_EXTRA_HISTORY_LENGTH 4
#define INVERTED_BIAS_LENGTH 8
#define HIGH_INVERTED_BIAS_LENGTH 4
#define LOW_INVERTED_BIAS_LENGTH 9
#define NUM_HISTORIES 55
#define WEIGHT_WIDTH 7
#define MAX_WEIGHT 63
#define MIN_WEIGHT -64
#define INIT_THETA_UPPER 70
#define INIT_THETA_LOWER -70
#define HIGH_THETA_UPPER 139
#define HIGH_THETA_LOWER -136
#define LOW_THETA_UPPER 50
#define LOW_THETA_LOWER -46
#define HASH_PRIME_1 511387U
#define HASH_PRIME_2 660509U
#define HASH_PRIME_3 1289381U
#define TAKEN_THRESHOLD 3

	// here's all the state for the predictor.  no local variables
	// are declared to make accounting for the bits easier.
	// parameters to functions are not counted against the hardware
	// budget because they are never modified and represent data
	// flow, not state.

	// an array of the lower 8 bits of the last several addresses
	// seen by the predictor

	addr_t global_addresses[HIGH_GLOBAL_HISTORY_LENGTH];

	// a vector of global history outcomes i.e. taken or not taken

	bool global_history_bits[HIGH_GLOBAL_HISTORY_LENGTH];

	// an array of local i.e. per-branch history outcome vectors
	// represented as 13-bit unsigned integers

	local_history_t local_histories[NUM_HISTORIES];

	// a special pool just for 7-bit bias weights indexed by a hash function

	weight_t biases[NUM_BIASES];

	// a general pool of 7-bit neural weights indexed by a hash function

	weight_t weights[NUM_WEIGHTS];

	// the 32-bit signed output of the neural computation; not strictly 
	// necessary since it can be recomputed but I count it anyway

	int output;

	// an index variable for various loops.

	short int i;

	// five 8-bit variables to hold various history lengths.
	// since these values can change over the course of the program,
	// they must be counted in the hardware budget.

	unsigned char
		local_history_length,
		global_history_length,
		extra_bias_length,
		extra_history_length,
		inverted_bias_length;

	// 9-bit signed upper and lower thresholds for determining whether 
	// to trigger the training algorithm.  since they can change they 
	// must be charged to the hardware budget

	short int theta_upper, theta_lower;

	// a single 13-bit local history for use in computations

	local_history_t lh;

	// a 32-bit count of the number of predictions made so far;
	// used to determine when to change history lengths

	unsigned int ntimes;

	// hash function of three unsigned integers.  this function tries
	// its best to scramble the bits in i, j, and k to result in a
	// uniform distribution

	unsigned int hash (unsigned int i, unsigned int j, unsigned int k) {
		return (i * HASH_PRIME_1) 
		     ^ (j * HASH_PRIME_2) 
		     ^ (k * HASH_PRIME_3);
	}

	// return a pointer to a bias weight corresponding to the given index

	weight_t *access_bias (unsigned int i) {
		// hash the index to produce a number between 
		// 0 and NUM_BIASES-1, then return a pointer to the
		// corresponding weight

		return &biases[hash(i, i+1, i+2) % NUM_BIASES];
	}

	// return a pointer to a general weight given three indices

	weight_t *access_weight (unsigned int i, unsigned int j, unsigned int k) {

		// hash the indices to produce a number between 0
		// and NUM_WEIGHTS-1, returning a pointer to the
		// corresponding weight
		return &weights[hash(i, j, k) % NUM_WEIGHTS];
	}

	// shift the lower 8 bits of an address into the sequence of recent 
	// branch PCs

	void shift_address (addr_t addr) {
		// shift everything over by 1

		for (i=global_history_length-1; i>=1; i--)
			global_addresses[i] = global_addresses[i-1];

		// place the address in the newly vacated spot

		global_addresses[0] = addr;
	}

	// shift a bit of history into the global history register

	void shift_history (bool taken) {
		// shift bits over by 1

		for (i=global_history_length-1; i>=1; i--)
			global_history_bits[i] = global_history_bits[i-1];

		// place the new history bit in the first position

		global_history_bits[0] = taken;
	}

	// a common operation in 'train': saturating increment or decrement
	// a weight

	void satincdec (bool inc, weight_t *c) {
		// increment *c if inc is true, decrement otherwise

		if (inc) {

			// saturate at a maximum of 63

			if (*c < MAX_WEIGHT) (*c)++;
		} else {

			// saturate at a minimum of -64

			if (*c > MIN_WEIGHT) (*c)--;
		}

		// verify that the weight has not exceeded the capacity
		// of a 7-bit signed integer

		assert (*c <= MAX_WEIGHT && *c >= MIN_WEIGHT);
	}

	// a common operation in 'compute_output': get either a weight
	// or its negation

	int negate_if_false (bool negate, weight_t x) {
		return negate ? (int) x : - (int) x;
	}

	// compute the dot product of the history registers and a dynamically
	// formed set of weights chosen by the path leading to this branch.
	// the result is left in the global 'output' variable

	void compute_output (unsigned int address) {

		// initially the output is the branch bias from the
		// pool of bias weights

		output = *access_bias (address);

		// add some extra bias weights from the general pool of weights

		for (i=0; i<extra_bias_length; i++)
			output += *access_weight (address, i, 0);

		// inverted bias weights

		for (i=1; i<=inverted_bias_length; i++)
			output -= *access_weight (address, 0, i+100);

		// add weights (or their negations) for global history

		for (i=1; i<global_history_length; i++) {
			output += negate_if_false (global_history_bits[i-1],
			*access_weight (address, global_addresses[i-1], i));
		}

		// use two other sources for the first few global weights;
		// this reduces the effect of aliasing on these most important
		// history positions

		for (i=1; i<=extra_history_length; i++) {
			// add 100 to the array index to get a different weight

			output += negate_if_false (global_history_bits[i-1],
			*access_weight (address, global_addresses[i-1], i+100));

			// add 101 to the array index to get another
			// different weight

			output += negate_if_false (global_history_bits[i-1],
			*access_weight (address, global_addresses[i-1], i+101));
		}

		// lh is the local history used for this branch

		lh = local_histories[address%NUM_HISTORIES];

		// add weights (or their negations) for local history

		for (i=1; i<=local_history_length; i++) {

			// the lowest bit of lh is the i'th bit in
			// the local history

			output += negate_if_false (lh & 1, 
			*access_weight (address, 0, i));

			// shift out the lowest bit of lh

			lh >>= 1;
		}
	}

	// update the weights using a variant of perceptron learning

	void train (unsigned int address, bool taken) {

		// if the prediction was correct...

		if (taken == (output >= TAKEN_THRESHOLD)

		// and the output is outside the thresholds...

		&& (output >= theta_upper || output <= theta_lower))

		// then we don't need to update the weights
			return;

		// increment the bias weight if the branch is taken,
		// decrement otherwise

		satincdec (taken, access_bias (address));

		// similarly update the extra bias weights

		for (i=0; i<extra_bias_length; i++)
			satincdec (taken, access_weight (address, i, 0));

		// inverted bias weights

		for (i=1; i<=inverted_bias_length; i++)
			satincdec (!taken, access_weight (address, 0, i+100));

		// update weights for global history

		for (i=1; i<global_history_length; i++) {
			// if the branch outcome agrees with the i'th branch
			// history, increment this weight, otherwise decrement
			// it
			satincdec (
			taken == global_history_bits[i-1],
			access_weight (address, global_addresses[i-1], i));
		}

		// similarly update the extra global history weights

		for (i=1; i<=extra_history_length; i++) {
			satincdec (
			taken == global_history_bits[i-1],
			access_weight (address, global_addresses[i-1], i+100));
			satincdec (
			taken == global_history_bits[i-1],
			access_weight (address, global_addresses[i-1], i+101));
		}

		// get the local history used for this branch

		lh = local_histories[address%NUM_HISTORIES];

		// update weights for local history

		for (i=1; i<=local_history_length; i++) {

			// the lowest bit of lh is the i'th bit in
			// the local history

			satincdec (
			taken == (lh & 1),
			access_weight (address, 0, i));

			// shift out the lowest bit

			lh >>= 1;
		}

		// update extra local weights

	}


	void PredictorInit() {
#define K 1024
		// make sure we don't use too many bits
		assert (64*K + 256 >= 
  WEIGHT_WIDTH * NUM_BIASES		// for biases
+ WEIGHT_WIDTH * NUM_WEIGHTS		// for weights
+ 8 * HIGH_GLOBAL_HISTORY_LENGTH * sizeof (addr_t) // global_addresses
+ HIGH_GLOBAL_HISTORY_LENGTH          	// for global_history_bits
+ HIGH_LOCAL_HISTORY_LENGTH * NUM_HISTORIES  // for local_histories
+ 8 * sizeof (output)			// for output
+ 8 * sizeof (i)			// for i
+ 8					// for global_history_length
+ 8					// for local_history_length
+ 8					// for extra_bias_length
+ 8					// for extra_history_length
+ 8					// for inverted_bias_length
+ 9					// for theta_upper (9 bits, max 139)
+ 9					// for theta_lower (9 bits, min -136)
+ HIGH_LOCAL_HISTORY_LENGTH		// for lh
+ 8 * sizeof (ntimes));			// for ntimes

		// initialize variable history lengths and thresholds
		// to their initial values

		global_history_length = INIT_GLOBAL_HISTORY_LENGTH;
		local_history_length = INIT_LOCAL_HISTORY_LENGTH;
		extra_bias_length = EXTRA_BIAS_LENGTH;
		extra_history_length = EXTRA_HISTORY_LENGTH;
		inverted_bias_length = INVERTED_BIAS_LENGTH;
		theta_upper = INIT_THETA_UPPER;
		theta_lower = INIT_THETA_LOWER;

		// start a count of branches

		ntimes = 0;

		// zero out the various arrays

		memset (biases, 0, sizeof biases);
		memset (weights, 0, sizeof weights);
		memset (global_addresses, 0, sizeof global_addresses);
		memset (global_history_bits, 0, sizeof global_history_bits);
		memset (local_histories, 0, sizeof local_histories);
	}

	void PredictorReset(){
		printf("Predictor:piecewise, cost: 64KB\n");

		PredictorInit();
	}

	// change the history lengths based on the estimated size of the
	// working set of branches in the first 300000 branches

	void maybe_change_history_lengths (void) {
		// one more branch

		ntimes++;

		// see if it is time to change the history lengths

		if (ntimes == 300000) {

			// reuse 'output' to count the number of bias weights
			// that record significant activity

			output = 0;
			for (i=0; i<NUM_BIASES; i++)

				// if this bias weight has a magnitude that
				// exceeds 2, then count it

				if (biases[i] < -2 || biases[i] > 2) output++;

			// if the estimated working set size is less than 300...
			if (output < 300) {
				// the use a high history length and high 
				// magnitude values for theta since there
				// is presumably less aliasing pressure

				theta_upper = HIGH_THETA_UPPER;
				theta_lower = HIGH_THETA_LOWER;
				global_history_length = HIGH_GLOBAL_HISTORY_LENGTH;
				local_history_length = HIGH_LOCAL_HISTORY_LENGTH;
				extra_bias_length = HIGH_EXTRA_BIAS_LENGTH;
				inverted_bias_length = HIGH_INVERTED_BIAS_LENGTH;
				extra_history_length = HIGH_EXTRA_HISTORY_LENGTH;
			} else {

				// otherwise use low history lengths to minimize
				// the effects of aliasing

				theta_upper = LOW_THETA_UPPER;
				theta_lower = LOW_THETA_LOWER;
				global_history_length = LOW_GLOBAL_HISTORY_LENGTH;
				local_history_length = LOW_LOCAL_HISTORY_LENGTH;
				extra_bias_length = LOW_EXTRA_BIAS_LENGTH;
				inverted_bias_length = LOW_INVERTED_BIAS_LENGTH;
				extra_history_length = LOW_EXTRA_HISTORY_LENGTH;
			}
			// re-initialize the general pool of weights (not sure
			// if this helps, but it seems right).

			memset (weights, 0, sizeof weights);
		}
	}

	// prediction interface to the CBP infrastructure

	bool get_prediction ( const cbp3_uop_dynamic_t *uop ) {
		// only try to predict conditional branches
		if ( uop->type & IS_BR_CONDITIONAL ) { // FIXME: this is checked twice...
			// mask off the high bit of the addresss to get
			// results consistent with my training harness

			compute_output ( uop->pc & 0x7fffffff);

			// predict taken if output is at least 3

			return output >= TAKEN_THRESHOLD;
		} else {
			return false;
		}
	}

	// updating interface to the CBP infrastructure

	void update_predictor (const cbp3_uop_dynamic_t *uop, bool taken) {
		// only update for conditional branches

		if ( uop->type & IS_BR_CONDITIONAL ) {

			// do perceptron learning for this branch address

			train ( uop->pc & 0x7fffffff, taken);

			// shift the 8 low bits of this address into
			// the global addresses array

			shift_address ((addr_t) uop->pc);

			// shift the branch outcome into the global history		
			shift_history (taken);

			// shift the branch outcome into a local history

#define LHI() ( uop->pc & 0x7fffffff) % NUM_HISTORIES

			local_histories[LHI()] <<= 1;
			local_histories[LHI()] |= taken;

			// make sure we use only 16 bits for a local history

			local_histories[LHI()] &= (1<<HIGH_LOCAL_HISTORY_LENGTH)-1;
			maybe_change_history_lengths();
		}
	}



void PredictorRunACycle() { // TODO: FIXME!
    // get info about what uops are processed at each pipeline stage
    const cbp3_cycle_activity_t *cycle_info = get_cycle_info();

    // make prediction at fetch stage
    for (int i = 0; i < cycle_info->num_fetch; i++) {
        uint32_t fe_ptr = cycle_info->fetch_q[i];
        const cbp3_uop_dynamic_t *uop = &fetch_entry(fe_ptr)->uop;

        if ( uop->type & IS_BR_CONDITIONAL) {
            // get prediction
            //uint32_t gidx = (brh_fetch ^ uop->pc) & ((1 << GSHARE_SIZE) - 1);
            //bool gpred = (gtable[gidx] >= 0);

            // report prediction:
            // you need to provide direction predictions for conditional branches,
            // targets of conditional branches are available at fetch stage.
            // for indirect branches, you need to provide target predictions.
            // you can report multiple predictions for the same branch
            // the framework will use the last reported prediction to calculate 
            // misprediction penalty
            assert(report_pred(fe_ptr, false, get_prediction ( uop )));
        }

        // update fetch branch history
        //if (uop->type & IS_BR_CONDITIONAL)
        //    brh_fetch = (brh_fetch << 1) | (uop->br_taken ? 1 : 0);
        //else if (uop_is_branch(uop->type))
        //    brh_fetch = (brh_fetch << 1) | 1;
    }

    for (int i = 0; i < cycle_info->num_retire; i++) {
        uint32_t rob_ptr = cycle_info->retire_q[i];
        const cbp3_uop_dynamic_t *uop = &rob_entry(rob_ptr)->uop;

        if ( uop->type & IS_BR_CONDITIONAL) {
        	update_predictor ( uop, uop->br_taken );
            /*
            uint32_t gidx = (brh_retire ^ uop->pc) & ((1 << GSHARE_SIZE) - 1);

            // update predictor
            bool t = uop->br_taken;
            if (t && gtable[gidx] < 1)
                gtable[gidx] ++;
            else if (!t && gtable[gidx] > -2)
                gtable[gidx] --;
                */
        }

        // update retire branch history
        //if (uop->type & IS_BR_CONDITIONAL)
        //    brh_retire = (brh_retire << 1) | (uop->br_taken ? 1 : 0);
        //else if (uop_is_branch(uop->type))
        //    brh_retire = (brh_retire << 1) | 1;
    }
}

void PredictorRunEnd() {
	// intentionally empty
}

void PredictorExit() {
	// intentionally empty
}
