/* This is the main function file.
 * All other code links with this.
 * The main function takes a bunch
 * of parameters and calls the
 * proper microbenchmark function.
 *
 *
 * par - parallelized version
 * seq - sequential version
 */

#include <omp.h>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <stdint.h>
#include <iostream>
#include <assert.h>

#include "main.h"
#include "papi_test.h"


using namespace std;

float Par(int64_t len, int64_t block_size);
float Seq(int64_t len, int64_t block_size);
float stride(int* src, int startindex, int endindex, long num_accesses, int64_t block_size);

bool test = 0;
int* src; 

int main(int argc, char** argv)
{
    if (argc < 3)
    {
        cout << "Usage: ./memread.exe <seq/par> <memsize>" << endl;
        exit(1);
    }
    int seq = atoi(argv[1]);
    int memsize = atoi(argv[2]);
    int64_t start_len = 1024 * 1024 * memsize / sizeof(int); // 16 MB
    int start_block_size = CACHE_LINE_SIZE;
    int max_threads = omp_get_max_threads();
    //cout << "max_threads : " << max_threads << endl;
    long max_array_len = start_len+1;//1024 * 1024 * 1024 / sizeof(int); // 10240 MB
    int max_block_size = start_block_size + 1;
    int64_t len = start_len; // length of the array
    int retval;

    retval = PAPI_library_init(PAPI_VER_CURRENT);
	if ( retval != PAPI_VER_CURRENT )
		test_fail( __FILE__, __LINE__, "PAPI_library_init", retval );

	retval =
		PAPI_thread_init( ( unsigned
							long ( * )( void ) ) ( omp_get_thread_num ) );
	if ( retval != PAPI_OK ) {
		if ( retval == PAPI_ESBSTR )
			test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval );
		else
			test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval );
    }

    src   = (int*)malloc(sizeof(int) * len);
    for (int i = 0; i < len; i++)
    {
        src[i] = CACHE_LINE_SIZE / sizeof(int);
    }

    int block_size = CACHE_LINE_SIZE;
    if (seq)
    {
        float time_seq = Seq(len, block_size);
        cout << time_seq << endl;
    }
    else
    {
        float time_par = Par(len, block_size);
        cout << time_par << endl;
    }
    /*
    float time_seq = Seq(len, block_size);
    cout << "time:  " << time_seq << endl;
    for (int num_threads = 1; num_threads <= max_threads; num_threads *= 2)
    {
        omp_set_num_threads(num_threads);
        float time_par = Par(len, block_size);
        cout << "\nnum threads " << num_threads << " Speedup : " << time_seq / time_par << endl;
    }*/
    //cout << endl;

    return 0;
}


float Par(int64_t len, int64_t block_size)
{
    float timetaken;
    #pragma omp parallel for schedule(static)
    for (int64_t acc   = 0; acc < omp_get_num_threads(); acc++) 
    {
        int tid = omp_get_thread_num();
        int num_threads = omp_get_num_threads();
        int startindex = tid * len / num_threads;
        int endindex = ((tid + 1) * len / num_threads) - 1;
        timetaken = stride(src, startindex, endindex, ACCESSES / num_threads, block_size);
    }

    testfinal();
    return timetaken;
}

float Seq(int64_t len, int64_t block_size) 
{
    float timetaken;
    for (int64_t acc   = 0; acc < omp_get_num_threads(); acc++) 
    {
        int tid = omp_get_thread_num();
        int num_threads = omp_get_num_threads();
        int startindex = tid * len / num_threads;
        int endindex = ((tid + 1) * len / num_threads) - 1;
        timetaken = stride(src, startindex, endindex, ACCESSES / num_threads, block_size);
    }

    testfinal();
    return timetaken;
}

// PAPI
