/*
 * pxxcore.h
 * Copyright (c) 2009, "Eugen Stoian <stoian.e@gmail.com>"
 * 
 * This library is free software; you can redistribute it and/or modify it under
 * the terms of the GNU Lesser General Public License as published by the Free
 * Software Foundation; either version 3 of the License, or (at your option)
 * any later version.
 * 
 * This library is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
 * details.
 * 
 * You should have received a copy of the GNU Lesser General Public License
 * along with this library; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
 * http://www.fsf.org/licensing/licenses/lgpl.txt
 */

#ifndef PXXCORE_H_
#define PXXCORE_H_

#ifdef _OPENMP
#include <omp.h>
#endif

namespace pxx {
//!Singleton class used as an interface with SIMD operations
/*!
 *  The \b core class is a singleton class used to initialize the library.\n
 *  Also this class provides access to low level matrix/vector operations.
 *  Usage example: \n
 *  \code
#include <iostream>
#include <pxx.h>
#include <sys/times.h>
#define ITER 10
int main(int argc,char* argv[]){
	pxx::matrix<float> m1(1024,1024);
	pxx::matrix<float> m2(1024,1024);
	pxx::matrix<float> m3(1024,1024);
	unsigned long t1,t2;
	double dt1,dt2;
	tms t;

	for(int i = 0; i < 1024;++i)
		for(int j =0; j < 1024;++j)
		{
			m1(i,j)=1.0f;
			m2(i,j)=1.0f;
		}
	//SSE2 optimized, 2 threads
	if(!pxx::core::init("libpxxsse2.so",2))
		std::cout << "Optimization module cannot be found. Using defaults (no SIMD optimizations)" << std::endl;
	t1 =times(&t);
	for(int k = 0; k<ITER; ++k)
		m3 = m1*m2;
	t2 = times(&t);
	dt1 = t2-t1;

	//No optimizations, serial
	pxx::core::init("",1);
	t1 = times(&t);
	for(int k = 0; k<ITER; ++k)
		m3 = m1*m2;
	t2 = times(&t);
	dt2 = t2-t1;

	std::cout << "SIMD optimized, parallel code (2 threads):"<< dt1 << " clocks"<<std::endl;
	std::cout << "SIMD unoptimized, serial code:"<<  dt2 << " clocks"<<std::endl;
	std::cout << "Improvement:"<<(dt2/dt1-1.0)*100<<"%"<<std::endl;

	pxx::core::end();
	return 0;
}
 *	\endcode
 */
class core {
public:
	enum unit {
		FPU, SSE, SSE2
	};
	typedef void (*op_v_vv_ps)(int, float const *, float const *, float *);
	typedef float  (*op_s_vv_ps)(int, float const *, float const *);
	typedef void (*op_v_vsv_ps)(int, float const *, float, float const *, float *);
	typedef void (*op_v_vs_ps)(int, float const *, float, float *);
	typedef void (*op_v_vv_pd)(int, double const *, double const *, double *);
	typedef double  (*op_s_vv_pd)(int, double const *, double const *);
	typedef void (*op_v_vsv_pd)(int, double const *, double, double const *,double *);
	typedef void (*op_v_vs_pd)(int, double const *, double, double *);
	//typedef void (*op_m_m_ps)(int, int, float ** const, float **);
	//typedef void (*op_m_m_pd)(int, int, double ** const, double **);

	typedef void ssum(int n, const float* x, const float* y, float* result);
	typedef void dsum(int n, const double* x, const double* y, double* result);
	typedef void (*sasum_type)(int, const float*, const float*, float*);
	typedef void (*dasum_type)(int, const double*, const double*, double* );
	typedef float (*sdot_type)(int , const float* , const float* );
	typedef double (*ddot_type)(int , const double* , const double* );
	typedef float (*snrm2_type)(int , const float* );
	typedef double (*dnrm2_type)(int , const double* );
	typedef void (*saxpy_type)(int , float , const float* , float* );
	typedef void (*daxpy_type)(int , float , const double* , double* );
	typedef void (*sscale_type)(int , float , float* );
	typedef void (*dscale_type)(int , double , double* );
	typedef int (*samax_type)(int , const float* );
	typedef int (*damax_type)(int , const double* );

	virtual ~core();

	static bool init(const char * path = "", int threadsCount = 1);

	static void end();

	static bool load(const char * path = "");

	static int getThreadsCount();

	static void setThreadsCount(int count);

	/*!
	 * \fn VAddV(int n, float const * op1, float const * op2, float * result)
	 * \brief Computes vectorial sum of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VAddV(int n, float const * op1, float const * op2,
			float * result) {
		(*_instance->_v_addv_ps)(n, op1, op2, result);
	}
	/*!
	 * \fn VAddV(int n, double const * op1, double const * op2, double * result)
	 * \brief Computes vectorial sum of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VAddV(int n, double const * op1, double const * op2,
			double * result) {
		(*_instance->_v_addv_pd)(n, op1, op2, result);
	}
	/*!
	 * \fn VSubV(int n, float const * op1, float const * op2, float * result)
	 * \brief Computes vectorial difference of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VSubV(int n, float const * op1, float const * op2,
			float * result) {
		(*_instance->_v_subv_ps)(n, op1, op2, result);
	}
	/*!
	 * \fn VSubV(int n, double const * op1, double const * op2, double * result)
	 * \brief Computes vectorial difference of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VSubV(int n, double const * op1, double const * op2,
			double * result) {
		(*_instance->_v_subv_pd)(n, op1, op2, result);
	}
	/*!
	 * \fn VMulV(int n, float const * op1, float const * op2, float * result)
	 * \brief Computes component multiplication of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulV(int n, float const * op1, float const * op2,
			float * result) {
		(*_instance->_v_mulv_ps)(n, op1, op2, result);
	}
	/*!
	 * \fn VMulV(int n, double const * op1, double const * op2, double * result)
	 * \brief Computes component multiplication of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulV(int n, double const * op1, double const * op2,
			double * result) {
		(*_instance->_v_mulv_pd)(n, op1, op2, result);
	}/*!
	 * \fn VDivV(int n, float const * op1, float const * op2, float * result)
	 * \brief Computes component division of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VDivV(int n, float const * op1, float const * op2,
			float * result) {
		(*_instance->_v_divv_ps)(n, op1, op2, result);
	}
	/*!
	 * \fn VDivV(int n, double const * op1, double const * op2, double * result)
	 * \brief Computes component division of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VDivV(int n, double const * op1, double const * op2,
			double * result) {
		(*_instance->_v_divv_pd)(n, op1, op2, result);
	}
	/*!
	 * \fn VDotV(int n, float const * op1, float const * op2)
	 * \brief Computes dot product of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static float VDotV(int n, float const * op1, float const * op2) {
		return (*_instance->_v_dotv_ps)(n, op1, op2);
	}
	/*!
	 * \fn VDotV(int n, double const * op1, double const * op2)
	 * \brief Computes dot product of \b op1 and \b op2. Result is stored in \b result which should be preallocated
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static double VDotV(int n, double const * op1, double const * op2) {
		return (*_instance->_v_dotv_pd)(n, op1, op2);
	}




	/*!
	 * \fn VMulS(int n, float const * op1, float op2, float * result)
	 * \brief Computes multiplication of vector \b op1 with scalar \b op2. Result is stored in \b result which should be preallocated.
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulS(int n, float const * op1, float op2,
			float * result) {
		(*_instance->_v_muls_ps)(n, op1, op2, result);
	}
	/*!
	 * \fn VMulS(int n, double const * op1, double  op2, double * result)
	 * \brief Computes multiplication of vector \b op1 with scalar \b op2. Result is stored in \b result which should be preallocated.
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulS(int n, double const * op1, double op2,
			double * result) {
		(*_instance->_v_muls_pd)(n, op1, op2, result);
	}
	/*!
	 * \fn VMulSAddV(int n, float const * op1, float  op2, float const * op3, float * result)
	 * \brief Computes \b op1*op2+op3. Result is stored in \b result which should be preallocated.
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulSAddV(int n, float const * op1, float op2,
			float const * op3, float * result) {
		(*_instance->_v_muls_addv_ps)(n, op1, op2, op3, result);

	}
	/*!
	 * \fn VMulSAddV(int n, double const * op1, double  op2, double const * op3, double * result)
	 * \brief Computes \b op1*op2+op3. Result is stored in \b result which should be preallocated.
	 *
	 * All vectors should be 16 byte aligned (see pxx::memalloc)
	 */
	inline static void VMulSAddV(int n, double const * op1, double op2,
			double const * op3, double * result) {
		(*_instance->_v_muls_addv_pd)(n, op1, op2, op3, result);

	}
/*
	inline static void MTranspose(int r, int c, float ** const op, float ** result){
		(*_instance->_m_transpose_ps)(r, c, op, result);
	}

	inline static void MTranspose(int r, int c, double ** const op, double ** result){
		(*_instance->_m_transpose_pd)(r, c, op, result);
	}
*/
private:
	core();
	void reset();
private:
	op_v_vv_ps _v_addv_ps;
	op_v_vv_ps _v_subv_ps;
	op_v_vv_ps _v_mulv_ps;
	op_v_vv_ps _v_divv_ps;
	op_v_vs_ps _v_muls_ps;
	op_v_vsv_ps _v_muls_addv_ps;
	op_s_vv_ps _v_dotv_ps;
	op_v_vv_pd _v_addv_pd;
	op_v_vv_pd _v_subv_pd;
	op_v_vv_pd _v_mulv_pd;
	op_v_vv_pd _v_divv_pd;
	op_v_vs_pd _v_muls_pd;
	op_v_vsv_pd _v_muls_addv_pd;
	op_s_vv_pd _v_dotv_pd;
	//op_m_m_ps _m_transpose_ps;
	//op_m_m_pd _m_transpose_pd;
	void * _handle;
	int _threadsCount;
	static core* _instance;
};

}

#endif /* PXXCORE_H_ */
