/*
 * matrix.h
 *
 *  Created on: 2016年3月5日
 *      Author: guyadong
 */

#ifndef FACEDETECT_MEMORY_CL_H_
#define FACEDETECT_MEMORY_CL_H_
#include <type_traits>
#include <sstream>
#include <utility>
#include <vector>
#include "mycl.h"
#include "tls_var.h"
#include "assert_macros.h"
#include "cl_utilits.h"
#include "cl-files/common_types.h"
/*
 * OpenCL内存抽象模型定义
 * memory_cl为抽象接口
 * matrix_cl 继承自memory_cl，是通用矩阵类
 * gray_matrix_cl继承自matrix_cl,灰度图像类
 * */
namespace gdface {
const static cl::size_t<3> ZeroOffset;
const static size_t INTEGRAL_ALIGN_2POWER = 4; // 积分图行对齐2的幂数
/*
 * 根据任务长度和指定的每个work-group的work-item数目(local_size),
 * 计算合理work-group数量
 * */
inline cl_uint compute_num_groups(cl::Device device,size_t len,size_t local_size){
	auto compute_units = device.getInfo<CL_DEVICE_MAX_COMPUTE_UNITS>();
	return len<local_size?cl_uint(1): std::min<cl_uint>(cl_uint((len+local_size-1)/ local_size), compute_units);
}
/*
 * 根据任务长度和指定的每个work-group的work-item数目(local_size),
 * 计算合理work-group数量
 * */
inline cl_uint compute_num_groups(size_t len,size_t local_size){
	return compute_num_groups(tls::device(),len,local_size);
}
template<typename CL_TYPE,
		typename ENABLE=typename std::enable_if<std::is_base_of<cl::Memory,CL_TYPE>::value>::type>
class memory_cl{
public:
	using cl_cpp_type=CL_TYPE;
private:
	mutable bool	on_device=false;	// 数据是否已经在设备上标志
public:
	cl_cpp_type cl_mem_obj; // OpenCL 内存对象
	mutable bool host_update=false;// 数据是否在主机上更新
	template <cl_int name>
	inline
	typename cl::detail::param_traits<cl::detail::cl_mem_info, name>::param_type
	getInfo()const {
		check_cl_mem_obj(SOURCE_AT);
		return dynamic_cast<const cl::Memory&>(cl_mem_obj).getInfo<name>();
	}
	inline cl::Context get_mem_context()const{
		return getInfo<CL_MEM_CONTEXT>();
	}
	inline size_t get_mem_size()const{
		return getInfo<CL_MEM_SIZE>();
	}
	inline void check_cl_mem_obj(const std::string &at)const{
		if(!cl_mem_obj())
			throw face_cl_exception(std::string(at).append(":uninitialized cl::Memory object"));
	}
	inline void check_context(const std::string &at,const cl::CommandQueue& command_queue)const{
		check_cl_mem_obj(at);
		if(command_queue()){
			auto mem_context=get_mem_context();
			auto queue_context=command_queue.getInfo<CL_QUEUE_CONTEXT>();
			if(mem_context()!=queue_context())
				throw std::invalid_argument(std::string(at).append(":mem_context()!=queue_context()"));
		}else
			throw std::invalid_argument(std::string(at).append(":invalid cl::Command_Queue"));
	}
	inline void check_size(const std::string &at,size_t size)const{
		auto buffer_size= get_mem_size();
		if(buffer_size!=size)
			throw std::invalid_argument(std::string(at).append(":buffer_size!=size"));
	}
	inline void check_mem_type(const std::string &at,cl_mem_object_type type)const{
		if(type!= getInfo<CL_MEM_TYPE>())
			throw face_cl_exception(std::string().append(":type!=mem_type"));
	}
	inline cl::size_t<3> full_region()const{
		auto mem_type=getInfo<CL_MEM_TYPE>();
		cl::size_t<3> region;
		const cl::Image &img=cl_mem_obj;
		switch(mem_type){
		case CL_MEM_OBJECT_IMAGE1D:{
			region[1] = 1;
			region[2] = 1;
			break;
		}
		case CL_MEM_OBJECT_IMAGE2D:{
			region[1] =img.getImageInfo<CL_IMAGE_HEIGHT>();
			region[2] = 1;
			break;
		}
		case CL_MEM_OBJECT_IMAGE3D:{
			region[1] = img.getImageInfo<CL_IMAGE_HEIGHT>();
			region[2] = img.getImageInfo<CL_IMAGE_DEPTH>();
			break;
		}
		default:
			throw face_cl_exception(SOURCE_AT,"unsupported CL_MEM_TYPE");
		}
		region[0] = img.getImageInfo<CL_IMAGE_WIDTH>();
		return std::move(region);
	}

	/* 如果数据没有上传到设备(on_device=false)，则向OpenCL设备中上传数据,
	 * 上传成功则将on_device置为true
	 * */
	void upload_if_need(const cl::CommandQueue& command_queue=tls::command_queue())const{
		if(!on_device||host_update){
			upload(command_queue);
		}
	}
	/*
	 * 根据CL_MEM_FLAGS是否有 CL_MEM_USE_HOST_PTR或CL_MEM_READ_ONLY决定是否从OpenCL设备下载数据
	 * */
	void download_if_need(const cl::CommandQueue& command_queue=tls::command_queue()){
		if(!(this->getInfo<CL_MEM_FLAGS>()&(CL_MEM_USE_HOST_PTR|CL_MEM_READ_ONLY))){
			download(command_queue);
		}
	}
	/* 虚函数，从OpenCL设备中下载结果数据, 将on_device标志置为true */
	virtual void download(const cl::CommandQueue& command_queue=tls::command_queue()){
		throw face_exception(SOURCE_AT,"sub class must implement the funtion "
				"by calling download_force(const cl::CommandQueue& command_queue,std::vector<E> &out)");
	}
	/* 虚函数，向OpenCL设备中上传原始矩阵数据, 将on_device标志置为true */
	virtual void upload(const cl::CommandQueue& command_queue=tls::command_queue())const{
		throw face_exception(SOURCE_AT,
				"sub class must implement the funtion "
				"by calling upload_force(const cl::CommandQueue& command_queue,std::vector<E> &in) ");
	}
	void writeBuffer(size_t size, const void* ptr, const cl::CommandQueue& command_queue=tls::command_queue())const{
		throw_if(0==size||nullptr==ptr)
		check_context(SOURCE_AT,command_queue);
		check_size(SOURCE_AT,size);
		check_mem_type(SOURCE_AT,CL_MEM_OBJECT_BUFFER);
		command_queue.enqueueWriteBuffer(
				this->cl_mem_obj,
				CL_TRUE,
				0,
				size,
				ptr);
		on_device=true;
	}
	void writeImage(size_t size,const void* ptr, const cl::CommandQueue& command_queue=tls::command_queue())const{
		throw_if(0==size||nullptr==ptr)
		check_context(SOURCE_AT,command_queue);
		check_size(SOURCE_AT,size);
		cl::size_t<3> region=full_region();
		static const cl::size_t<3> ZeroOffset;
		auto &img=dynamic_cast<const cl::Image &>(cl_mem_obj);
		command_queue.enqueueWriteImage(cl_mem_obj,
				CL_TRUE,
				ZeroOffset,
				region,
				img.getImageInfo<CL_IMAGE_ROW_PITCH>(),
				img.getImageInfo<CL_IMAGE_SLICE_PITCH>(),
				(void*)ptr);
		on_device=true;
	}
	template<typename E, typename _CL_TYPE = CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Buffer,_CL_TYPE>::value>::type
	upload_force(const std::vector<E> &in,const cl::CommandQueue& command_queue=tls::command_queue()) const{
		check_cl_mem_obj(SOURCE_AT);
		auto flags=getInfo<CL_MEM_FLAGS>();
		if( !this->host_update && ((flags&(CL_MEM_COPY_HOST_PTR|CL_MEM_USE_HOST_PTR)) || (0 == in.size())) )
			this->on_device=true;
		else{
			writeBuffer(in.size()*sizeof(E),in.data(), command_queue);
			this->host_update=false;
		}
	};
	template<typename E,typename _CL_TYPE=CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Image2D,_CL_TYPE>::value>::type
	upload_force(const std::vector<E> &in,const cl::CommandQueue& command_queue=tls::command_queue()) const{
		check_cl_mem_obj(SOURCE_AT);
		auto flags=getInfo<CL_MEM_FLAGS>();
		if((flags&(CL_MEM_COPY_HOST_PTR|CL_MEM_USE_HOST_PTR))
			|| (0==in.size()))
			on_device=true;
		else
			writeImage(in.size()*sizeof(E),in.data(), command_queue);
	}
	/*
	 * 从cl_mem_obj对象中下载数据到out
	 */
	template<typename E, typename _CL_TYPE = CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Buffer,_CL_TYPE>::value>::type
	download_force(std::vector<E> &out, const cl::CommandQueue& command_queue=tls::command_queue()) const{
		check_context(SOURCE_AT,command_queue);
		auto cl_size = get_mem_size();
		throw_except_if_msg(face_cl_exception,cl_size%sizeof(E),"invalid cl:Buffer size")	 // cl_size必须是sizof(E)的倍数
		if (out.size()*sizeof(E) != cl_size)
			out = std::vector<E>(cl_size/sizeof(E));
		command_queue.enqueueReadBuffer(this->cl_mem_obj,
				CL_TRUE,
				0,
				cl_size,
				(void*)out.data());
		on_device=true;
	}
	template<typename E, typename _CL_TYPE = CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Image2D,_CL_TYPE>::value>::type
	download_force(std::vector<E> &out,size_t row_pitch=0,const cl::CommandQueue& command_queue=tls::command_queue()) const{
		check_context(SOURCE_AT,command_queue);
		cl::size_t<3> region=full_region();
		static const cl::size_t<3> ZeroOffset;
		// 分配目标图像空间 
		auto need_size=(row_pitch?row_pitch:region[0]) * region[1];
		if (out.size() != need_size)
			out = std::vector<E>(need_size);
		command_queue.enqueueReadImage(this->cl_mem_obj,
				CL_TRUE,
				ZeroOffset,
				region,
				row_pitch,
				0,
				(void*) out.data());
		on_device=true;
	}

	memory_cl(const CL_TYPE& cl_mem_obj,bool on_device):cl_mem_obj(cl_mem_obj),on_device(on_device){};
	memory_cl(const memory_cl&)=default;
	memory_cl(memory_cl&&)=default;
	memory_cl()=default;
	memory_cl& operator=(const memory_cl&)=default;
	memory_cl& operator=(memory_cl&&rv)=default;
	/* operator type()操作符，返回OpenCL内存对象
	 * */
	operator const cl_cpp_type& ()const{	return this->cl_mem_obj;	}
	operator cl_cpp_type&(){return this->cl_mem_obj;}
	virtual ~memory_cl()=default;
};
/* 以a指定2次幂向止对齐v */
template<typename T>T static inline align_up(T v,size_t a){return (T)((v+(T)((1<<a)-1))>>a<<a);}
/* 为OpenCL内核函数并行计算方便 设置特殊的行对齐宽度 */
template<typename T>T static inline align_up_plus(T v,size_t s,size_t a){return align_up((T)(align_up(v,a)+s),a);}
/* v/a向上取整 */
template<typename T>T static inline ceil_div(T v, size_t a) { return (T)((v + a - 1) / a ); }
/*
 * 将std::vector封装成kernel内存对象
 */
template<typename E	>
struct struct_vector_cl:public memory_cl<cl::Buffer>,public std::vector<E>{
	using base_type_vector=std::vector<E>;
	using self_type=struct_vector_cl<E>;
	using base_type_vector::base_type_vector;
	size_t cl_size=0; // element size
	struct_vector_cl(const base_type_vector&v,cl_mem_flags flags,const cl::Context &context=global_facecl_context.getContext()):base_type_vector(v){
		throw_if_msg(0 == base_type_vector::size(),"size of vector must be not zero")
		this->cl_mem_obj=cl_utilits::createBuffer(flags,context,base_type_vector::size()*sizeof(E),(const void*)base_type_vector::data());
	}
	struct_vector_cl() = default;
	struct_vector_cl(const base_type_vector&lv):base_type_vector(lv){}
	struct_vector_cl(base_type_vector&&rv):base_type_vector(std::move(rv)){}
	/* 创建数据源于主机的cl::Buffer
	 * */
	self_type& createBuffer(cl_mem_flags flags,const cl::Context &context){
		this->cl_mem_obj=cl_utilits::createBuffer(flags,context,base_type_vector::size()*sizeof(E),(const void*)base_type_vector::data());
		return *this;
	}
	/* 创建数据只存在于设备的cl::Buffer,初始不在主机端分配内存,
	 * 要求vector长度为0,否则抛出异常,
	 * cl_size不能为0,否则抛出异常
	 * */
	struct_vector_cl(size_t cl_size,bool ,const cl::Context &context=global_facecl_context.getContext()){
		throw_if_msg(0 == cl_size,"size of vector must be zero,the arugment cl_size must not be zero ")
		this->cl_size = cl_size;
		this->cl_mem_obj=cl_utilits::createBuffer(CL_MEM_READ_WRITE,context, cl_size*sizeof(E),nullptr);

	}
	size_t cl_buffer_size(){return cl_size?cl_size:this->size();}
	/*
	 * 用指定的数据(pattern)填充cl::Buffer指定区域
	 * offset填充区域偏移量(元素)
	 * size 填充区域长度(元素)
	 * */
	self_type&fillBuffer(E pattern, size_t offset=0, size_t size=0,cl::CommandQueue command_queue=tls::command_queue()){
		throw_if(offset+size>cl_buffer_size())
		size=size?size:cl_buffer_size();
		command_queue.enqueueFillBuffer(*this,pattern,offset*sizeof(E), size*sizeof(E));
		return *this;
	}
	/*
	 * 用指定的数据(pattern)填充cl::Buffer指定区域
	 * offset填充区域偏移量(字节)
	 * size 填充区域长度(字节)
	 * */
	template<typename PatternType>
	self_type&fillBuffer(PatternType pattern, size_t offset=0, size_t size=0,cl::CommandQueue command_queue=tls::command_queue()){
		throw_if(offset+size>cl_buffer_size()*sizeof(E))
		size=size?size:cl_buffer_size()*sizeof(E);
		throw_if_msg(size%sizeof(PatternType),"invalid pattern type, this argument size must be a multiple of sizof (pattern) ")
		command_queue.enqueueFillBuffer(*this,pattern,offset, size);
		return *this;
	}

	virtual void download(const cl::CommandQueue& command_queue=tls::command_queue()){
		this->download_force(*this,command_queue);
	}
	virtual void upload(const cl::CommandQueue& command_queue=tls::command_queue())const{
		this->upload_force(*this,command_queue);
	}
};
/*
 * 将一个struct结构封装成kernel内存对象
 */
template<typename E
,typename ENABLE=typename std::enable_if<std::is_object<E>::value&&!std::is_scalar<E>::value>::type
>
struct struct_cl:public memory_cl<cl::Buffer>,public E{
	using base_type_struct=E;
	using self_type=struct_cl<E,void>;
	using E::E;
	struct_cl(const base_type_struct& v,cl_mem_flags flags,const cl::Context &context):base_type_struct(v){
			this->cl_mem_obj=cl_utilits::createBuffer(flags,context,sizeof(E),dynamic_cast<E*>(this));
	}
	struct_cl() = default;
	struct_cl(const base_type_struct&lv){
		dynamic_cast<E&>(*this)=lv;
	}
	struct_cl(base_type_struct&&rv){
		dynamic_cast<E&>(*this)=std::move(rv);
	}
	self_type& createBuffer(cl_mem_flags flags,const cl::Context &context){
		this->cl_mem_obj=cl_utilits::createBuffer(flags,context,sizeof(E),dynamic_cast<E*>(this));
		return *this;
	}
	struct_cl(const struct_cl &)=default;
	struct_cl(struct_cl &&)=default;
	struct_cl& operator=(const struct_cl&)=default;
	struct_cl& operator=(struct_cl&&)=default;
	virtual void download(const cl::CommandQueue& command_queue=tls::command_queue()){
		std::vector<E> v;
		this->download_force(v,command_queue);
		dynamic_cast<E&>(*this)=std::move(v[0]);
	}
	virtual void upload(const cl::CommandQueue& command_queue=tls::command_queue())const{
		std::vector<E>v={*this};
		this->upload_force(v,command_queue);
	}
};
/*
 * 将一个标量类型结构封装成kernel内存对象
 */
template<typename E
,typename ENABLE=typename std::enable_if<std::is_scalar<E>::value>::type
>
struct scala_cl:public memory_cl<cl::Buffer>{
	using self_type=scala_cl<E,void>;
	E value;
	scala_cl(const E &value,cl_mem_flags flags=CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR
				,const cl::Context &context=global_facecl_context.getContext()):value(value){
		this->cl_mem_obj = cl_utilits::createBuffer(flags, context, sizeof(E), std::addressof(this->value));
	}
	scala_cl():scala_cl(E(0)){}
	scala_cl(const scala_cl&)=default;
	scala_cl(scala_cl&&)=default;
	scala_cl& operator=(const scala_cl&)=default;
	scala_cl& operator=(scala_cl&&)=default;
	virtual void download(const cl::CommandQueue& command_queue=tls::command_queue()){
		std::vector<E> v;
		this->download_force(v,command_queue);
		value=v[0];
	}
	virtual void upload(const cl::CommandQueue& command_queue=tls::command_queue())const{
		std::vector<E>v={value};
		this->upload_force(v,command_queue);
	}
	/*
	 * 赋值操作符，更新value,并将on_device标志置为false,
	 * 以便下次执行kernel时更新设备上的数据
	 * */
	scala_cl& operator=(const E&v){
		this->value=v;
		this->host_update=true;
		return *this;
	}
};
/* 矩阵抽象类 */
template<typename E
	,typename CL_TYPE
	,typename ENABLE=typename std::enable_if< std::is_base_of<cl::Buffer		,CL_TYPE>::value
											||std::is_base_of<cl::Image2D	,CL_TYPE>::value>::type>
struct matrix_cl:public memory_cl<CL_TYPE>,public matrix_info_cl{
	using base_type=memory_cl<CL_TYPE>;
	using self_type=matrix_cl<E,CL_TYPE>;
	using element_type=E;
	cl_uint		col_stride = 0;		// 列对齐高度,只用于矩阵转置是记录行对齐宽度
	std::vector<E>	v;					// 矩阵数据
	/*
	 * 创建指定尺寸的矩阵,
	 * align>=0时,行宽度(row_stride)为align的2次幂(align<16)
	 * align<0,-align为实际行宽度(row_stride)
	 * 如果ptr不为nullptr则用ptr所指向的数据填充矩阵
	 * ptr的数据长度必须>=width,height,align定义的尺寸,否则在创建std::vector时会产生不可预期的后果
	 * */
	matrix_cl(size_t width, size_t height, int align=0,const E*ptr=nullptr):
		matrix_info_cl({ (cl_uint)width, (cl_uint)height,align < 0 ? cl_uint(-align) : align_up((cl_uint)width,align & 0x0f) })
	{
		if(nullptr!=ptr)
			this->v=std::vector<E>(ptr,ptr+(get_row_stride()*height));
	};
	matrix_cl(const matrix_info_cl& mi, const E*ptr = nullptr) :matrix_info_cl(mi){
		if (nullptr != ptr)
			this->v = std::vector<E>(ptr, ptr + (get_row_stride()*height));
	};
	matrix_cl()=default;
	/*
	 * 创建指定尺寸的矩阵
	 * align>=0时,行宽度(row_stride)为align的2次幂(align<16)
	 * align<0,-align为实际行宽度(row_stride)
	 * */
	template<typename _BASE
		,typename _ENABLE = typename std::enable_if<std::is_base_of<base_type, typename std::decay<_BASE>::type>::value>::type
		>
	matrix_cl(size_t width, size_t height, int align,_BASE &&base):base_type(std::forward<_BASE>(base))
		, matrix_info_cl({ (cl_uint)width, (cl_uint)height,align < 0 ? cl_uint(-align) : align_up((cl_uint)width,align & 0x0f) }){};
	/*
	 * 创建指定尺寸的矩阵,
	 * 用参数v(std::vector)中的数据初始化矩阵，
	 * 如果v的数据长度与width,heigh,align定义的长度不符则抛出异常
	 * 根据v的引用类型来选择对成员变量v(std::vector)的初始化方式
	 * */
	template<typename _V,bool _RV=std::is_rvalue_reference<_V&&>::value
			,typename _ENABLE=typename std::enable_if<std::is_base_of<std::vector<E>,typename std::decay<_V>::type>::value>::type
			>
	matrix_cl(size_t width, size_t height, int align,_V &&v):matrix_cl(width,height,align){
		throw_if(v.size()&&get_row_stride()*height!=v.size())
		this->v=_RV?std::move(v):v;
	};
	matrix_cl(size_t width,size_t height, int align,const cl::Context &context):matrix_cl(width,height,align){
		throw_if(0==width||0==height)
		this->cl_mem_obj=this->createBuffer(CL_MEM_READ_WRITE,context);
	}
	matrix_cl(const matrix_info_cl& mi, const cl::Context &context):matrix_cl(mi){
		throw_if(0== mi.width||0== mi.height)
		this->cl_mem_obj=this->createBuffer(CL_MEM_READ_WRITE,context);
	}
	virtual ~matrix_cl()=default;

	/* 返回矩阵的行对齐宽度
	 * */
	inline cl_uint get_row_stride()const{
		return row_stride;
	}

	cl::Buffer createBuffer(cl_mem_flags flags,const cl::Context &context)const{
		return cl_utilits::createBuffer(flags,context,get_row_stride()*height*sizeof(E),(const void*)v.data());
	}
	cl::Image2D createImage2DGray(cl_mem_flags flags,const cl::Context &context){
		const auto row_pitch=get_row_stride();
		// 图像参数定义的图像大小与 vector实际大小不相等时抛出异常
		throw_if(v.size()&&v.size()!=row_pitch * height)
		auto format=cl_utilits::getFittedImageFormatForGray(context, flags);
		return cl_utilits::createImage2D(flags,context,format,width,height,row_pitch,v.empty()?nullptr:v.data());
	}
	/*
	 * 用指定的数据(pattern)填充cl::Buffer指定区域
	 * offset填充区域偏移量(元素)
	 * size 填充区域长度(元素)
	 * */
	template<typename _CL_TYPE=CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Buffer, _CL_TYPE>::value,self_type>::type
	fillBuffer(E pattern, cl_uint2 offset={0,0}, cl_uint2 size={0,0},cl::CommandQueue command_queue=tls::command_queue()){
		// gcc下编译无法找到正确的operator+ 改回使用算术相加
		//cl_uint2 bottom=offset + size;
		cl_uint2 bottom{offset.x+size.x,offset.y+size.y};
		throw_if(bottom.x*bottom.y>this->get_row_stride()*this->height)
		if(size==cl_uint2{0,0})
			size = { this->width,this->height };
		if (size == cl_uint2{ this->width,this->height }) {
			//填充整个cl::Buffer
			command_queue.enqueueFillBuffer(*this,pattern,0, this->get_row_stride()*this->height*sizeof(E));
		}else{
			size.x*=sizeof(E);
			offset.x*=sizeof(E);
			for(cl_uint end_y=offset.y+size.y,row_stride=cl_uint(this->get_row_stride()*sizeof(E)),index=offset.y*row_stride+offset.x;
						offset.y<end_y;
						++offset.y,index+=row_stride){
				command_queue.enqueueFillBuffer(*this,pattern,index, size.x);
			}
		}

		return *this;
	}
	virtual void download(const cl::CommandQueue& command_queue=tls::command_queue()){
		this->_download(command_queue);
	}
	virtual void upload(const cl::CommandQueue& command_queue=tls::command_queue())const{
		this->upload_force(v,command_queue);
	}


	/*
	 * 计算矩阵中的每一行的前缀和(prefix_sum)
	 */
	template< typename DST_E
			,typename _CL_TYPE=CL_TYPE,typename _E=E
			,typename RET_TYPE=matrix_cl<DST_E,cl::Buffer>>
	typename 	std::enable_if<std::is_scalar<_E>::value
														&&std::is_scalar<DST_E>::value
														&&sizeof(DST_E)>=sizeof(_E)
														&&std::is_base_of<cl::Buffer,_CL_TYPE>::value
														,RET_TYPE
														>::type
	prefix_sum_col_and_transpose(const std::string &kernel_name,int align)const{
		this->check_cl_mem_obj(SOURCE_AT);
		RET_TYPE integral_mat(height, width,align,this->get_mem_context());
		run_kernel(global_facecl_context.getKernel(kernel_name)
			, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ compute_num_groups(this->height,INTEGRAL_COLUMN_STEP)*INTEGRAL_COLUMN_STEP }, cl::NDRange{ INTEGRAL_COLUMN_STEP })
			, false
			, *this
			, integral_mat
			, this->width
			, this->height
			, this->get_row_stride()
			, integral_mat.get_row_stride()
			);
		return integral_mat;
	}
	/*
	 * 矩阵转置
	 */
	template<typename _CL_TYPE=CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Buffer, _CL_TYPE>::value,self_type>::type
	transpose(const std::string &kernel_name)const{
		this->check_cl_mem_obj(SOURCE_AT);
		self_type dst_mat(height, width, -(int)this->col_stride, this->get_mem_context());
		dst_mat.col_stride =  this->get_row_stride(); // 记录转置前的水平对齐值
		run_kernel(global_facecl_context.getKernel(kernel_name)
				, cl::EnqueueArgs(tls::command_queue(),{ 1, height })//每个work-iteam处理1行
				, false
				, *this
				, dst_mat
				, width
				, this->get_row_stride()
				, dst_mat.get_row_stride()
			);
		return dst_mat;
	}
	/*
	 * 根据提供的kernel名字计算矩阵的积分图
	 * */
	template<typename DST_E
		,typename RET_TYPE=matrix_cl<DST_E,cl::Buffer>>
	typename std::enable_if<std::is_scalar<DST_E>::value,RET_TYPE>::type
	integral(
			 const std::string &prefix_sum_kernel1
			,const std::string &prefix_sum_kernel2
			,int align=0) const {
		auto integral_mat1=prefix_sum_col_and_transpose<DST_E>(prefix_sum_kernel1,INTEGRAL_ALIGN_2POWER);
		auto integral_mat2=integral_mat1.prefix_sum_col_and_transpose<DST_E>(prefix_sum_kernel2,INTEGRAL_ALIGN_2POWER);
		return integral_mat2;
	}
	template<typename DST_E
		,typename RET_TYPE=matrix_cl<DST_E,cl::Buffer>>
	typename std::enable_if<std::is_scalar<DST_E>::value,RET_TYPE>::type
	integral(
			 const std::string &integral_block_kernel
			,const std::string &integral_scan_v_kernel
			,const std::string &integral_combine_v_kernel
			,const std::string &integral_scan_h_kernel
			,const std::string &integral_combine_h_kernel
			,int align=0) const {
		RET_TYPE integral_mat(width,height,align,this->get_mem_context());
		const auto &wg_hint= tls::pe_num_per_cu();
		struct_cl<integ_param> param={
				 {(cl_int)this->width,(cl_int)this->height, (cl_int)this->get_row_stride(), (cl_int)integral_mat.get_row_stride()}
				, CL_MEM_USE_HOST_PTR| CL_MEM_READ_WRITE,tls::context()};
		run_kernel(global_facecl_context.getKernel(integral_block_kernel)
			, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ align_up(ceil_div(this->width,4),wg_hint.second),ceil_div(this->height,4) }, cl::NDRange{wg_hint.first,1})
			, false
			, *this
			, integral_mat
			, param
			);
		RET_TYPE integral_mat2(width,height,align,this->get_mem_context());
		{
			RET_TYPE v_mat(integral_mat.width>>2,integral_mat.height,2,this->get_mem_context());
			run_kernel(global_facecl_context.getKernel(integral_scan_v_kernel)
						, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ align_up(this->height,wg_hint.second) },cl::NDRange{wg_hint.first})
						, false
						, integral_mat
						, param
						, v_mat
						, cl_int(v_mat.get_row_stride())
						);
			run_kernel(global_facecl_context.getKernel(integral_combine_v_kernel)
						, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ align_up(ceil_div(this->width,4),wg_hint.second),this->height }, cl::NDRange{wg_hint.first,1})
						, false
						, integral_mat
						, param
						, v_mat
						, cl_int(v_mat.get_row_stride())
						, integral_mat2
						);
		}
		{
			RET_TYPE h_mat(integral_mat.width,integral_mat.height>>2,2,this->get_mem_context());
			run_kernel(global_facecl_context.getKernel(integral_scan_h_kernel)
						, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ align_up(ceil_div(this->width,4),wg_hint.second) }, cl::NDRange{wg_hint.first})
						, false
						, integral_mat2
						, param
						, h_mat
						, cl_int(h_mat.get_row_stride())
						);
			run_kernel(global_facecl_context.getKernel(integral_combine_h_kernel)
						, cl::EnqueueArgs(tls::command_queue(), cl::NDRange{ align_up(ceil_div(this->width,4),wg_hint.second),this->height }, cl::NDRange{wg_hint.first,1})
						, false
						, integral_mat2
						, param
						, h_mat
						, cl_int(h_mat.get_row_stride())
						, integral_mat
						);
		}
		// 这里必须要等待命令结束，否则integral_mat2对象会被提前解析
		tls::command_queue().finish();
		return integral_mat;
	}
	/*
	 * 矩阵中不为0的元素个数的积分图
	 * */
	template<typename _E=E,typename _CL_TYPE=CL_TYPE>
	typename std::enable_if<1==sizeof(_E)&&std::is_same<_CL_TYPE,cl::Buffer>::value,matrix_cl<cl_ushort,cl::Buffer>>::type
	integral_count(int align) const {
		if(tls::device_type_is(CL_DEVICE_TYPE_CPU)){
			return integral<cl_ushort>(
					 KERNEL_NAME_VAR(prefix_sum_col_and_transpose,_PREFIX_SUM_SUFFIX(_uchar_ushort,integ_count))
					,KERNEL_NAME_VAR(prefix_sum_col_and_transpose,_PREFIX_SUM_SUFFIX(_ushort_ushort,integ_default))
					,align);
		}
		return integral<cl_ushort>(
				 KERNEL_NAME_VAR(integral_block,_PREFIX_SUM_SUFFIX(_uchar_ushort,integ_count))
				,KERNEL_NAME_VAR(integral_scan_v,_ushort)
				,KERNEL_NAME_VAR(integral_combine_v,_ushort)
				,KERNEL_NAME_VAR(integral_scan_h,_ushort)
				,KERNEL_NAME_VAR(integral_combine_h,_ushort)
				,align);
	}
	/* 前缀和计算模板函数
	 * is_square为true时计算平方和
	 * */
	template<integral_type TYPE,typename _T1,typename _T2>
	inline static
	typename std::enable_if<sizeof(_T1)<=sizeof(_T2)>::type
	prefix_sum(const _T1 *const src,_T2 *const dst,size_t size){
		assert(nullptr!=src&&nullptr!=dst);
		switch(TYPE){
		case integ_square:
			dst[0] = _T2(src[0])*_T2(src[0]);
			for( size_t i=1; i<size; ++i)	dst[i]=_T2(src[i])*_T2(src[i])+dst[i-1];
			break;
		case integ_count:
			dst[0] = _T2(src[0]?1:0);
			for( size_t i=1; i<size; ++i)	dst[i]=_T2(src[i]?1:0)+dst[i-1];
			break;
		default:
			dst[0] = _T2(src[0]);
			for( size_t i=1; i<size; ++i)	dst[i]=_T2(src[i])+dst[i-1];
		}
	}
	
	void show_matrix(std::ostream &stream=std::cout)const{

		auto ptr=v.data();
		auto row_stride=this->get_row_stride();
		stream << std::endl;
		for(size_t y=0;y<height;++y
				,ptr+=row_stride){
			for(size_t x=0;x<width;++x){
				stream.width(sizeof(ptr[0])*2+2);
				stream.flags(std::ios::right | std::ios::hex | std::ios::showbase);
				stream<< (int)(ptr[x]);
				stream<<",";
			}
			stream<<std::endl;
		}
		stream<<std::endl;
	}
	/*
	 * 标准的积分图算法(cpu)
	 * 返回积分图矩阵对象
	 * DST_E 积分图矩阵的的元素类型(cl_ulong,cl_float,cl_double)
	 * ALIGN 积分图行对齐指数
	 * is_square为true时为积方图对象
	 */
	template<typename DST_E,int ALIGN=1,integral_type TYPE,typename _E=E
		,typename RET_TYPE=matrix_cl<DST_E,cl::Buffer>>
	typename std::enable_if<sizeof(_E)<=sizeof(DST_E),RET_TYPE>::type
	integral_cpu()const {
		throw_if(this->get_row_stride()*this->height!=this->v.size())
		auto integ_mat = RET_TYPE(this->width,this->height,uint8_t(ALIGN));
		// 行宽度
		const auto src_row_stride = this->get_row_stride();
		const auto dst_row_stride = integ_mat.get_row_stride();
		if (!integ_mat.v.size())
			integ_mat.v = std::vector<DST_E>(dst_row_stride * this->height);
		auto last_line	= integ_mat.v.data(); // 积分图上一行指针
		auto cur_line 	= last_line;			// 积分图当前行指针
		auto src_line 	= this->v.data();		// 原图当前行指针
		// 计算第一行前缀和
		prefix_sum<TYPE>(src_line, cur_line, this->width);
		src_line += src_row_stride;	// 积分图当前行指针步进一行
		cur_line += dst_row_stride;
		DST_E line_sum; // 积分图当前行所有元素之和
		typename std::decay<decltype(this->height)>::type y;
		typename std::decay<decltype(this->width )>::type x;
		// 从第二行开始计算积分图
		switch(TYPE){
		case integ_square:
			for (y = 1; y < this->height;	++y,
													src_line	+= src_row_stride,
													cur_line	+= dst_row_stride,
													last_line+= dst_row_stride) {
				line_sum = 0;
				for ( x = 0; x < this->width; ++x) {
					line_sum += DST_E(src_line[x])*DST_E(src_line[x]);
					cur_line[x] = line_sum + last_line[x];
				}
			}
			break;
		case integ_count:
			for (y = 1; y < this->height;	++y,
													src_line	+= src_row_stride,
													cur_line	+= dst_row_stride,
													last_line+= dst_row_stride) {
				line_sum = 0;
				for ( x = 0; x < this->width; ++x) {
					line_sum += DST_E(src_line[x]?1:0);
					cur_line[x] = line_sum + last_line[x];
				}
			}
			break;
		default:
			for (y = 1; y < this->height;	++y,
													src_line	+= src_row_stride,
													cur_line	+= dst_row_stride,
													last_line+= dst_row_stride) {
				line_sum = 0;
				for ( x = 0; x < this->width; ++x) {
					line_sum += DST_E(src_line[x]);
					cur_line[x] = line_sum + last_line[x];
				}
			}
		}
		return integ_mat;
	}
	matrix_cl(const matrix_cl&)=default;
	matrix_cl(matrix_cl&&)=default;
	matrix_cl& operator=(const matrix_cl&)=default;
	matrix_cl& operator=(matrix_cl&&)=default;
private:
	template<typename _CL_TYPE = CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Buffer,_CL_TYPE>::value>::type
	_download(const cl::CommandQueue& command_queue=tls::command_queue()){
		this->download_force(v,command_queue);
	}
	template<typename _CL_TYPE = CL_TYPE>
	typename std::enable_if<std::is_base_of<cl::Image2D,_CL_TYPE>::value>::type
	_download(const cl::CommandQueue& command_queue=tls::command_queue()){
		this->download_force(v,get_row_stride(),command_queue);
	}
};

/* 模板函数，检查T是否为memory_cl的子类 */
template<typename T>
struct is_kind_of_memory_cl{
	template <typename CL_TYPE>
		static CL_TYPE  check(const memory_cl<CL_TYPE>&);
	static void check(...);
	using cl_type=decltype(check(std::declval<T>()));
	enum{value=!std::is_same<cl_type,void>::value};
};
/*
 * upload_arg(x)_if_need和download_arg(x)系列模板函数循环对run_kernel中的所有变长参数类型进行识别,
 * 对于memory_cl类型的参数，根据需要在kernel执行前上传数据到设备，
 * 并在kernel执行后根据需要下载输出数据到主机
 * 模板中的N参数，用于调试时知道哪个参数出错
 * */
// 参数ARG为非memory_cl类型
template<int N,typename ARG>
typename std::enable_if<!is_kind_of_memory_cl<ARG>::value>::type
inline upload_arg_if_need(const cl::CommandQueue &command_queue,const ARG & arg){}
// 参数ARG是memory_cl类型
template<int N,typename ARG>
typename std::enable_if<is_kind_of_memory_cl<ARG>::value>::type
inline upload_arg_if_need(const cl::CommandQueue &command_queue,const ARG & arg){
	auto mem_context= arg.get_mem_context();
	auto queue_context=command_queue.getInfo<CL_QUEUE_CONTEXT>();
	// 检查memory_cl中内存对象的context与command_queue是否一致，不一致则抛出异常
	if(mem_context()!=queue_context()){
		std::stringstream stream;
		stream<<":the arg No:"<<N;// 动态参数编号
		throw std::invalid_argument(std::string(SOURCE_AT).append(stream.str()).append(":mem_context()!=queue_context()"));
	}
	try{
		arg.upload_if_need(command_queue);//上传数据到设备
	}catch(cl::Error&e){
		std::stringstream stream;
		stream<<"the arg No:"<<N;// 动态参数编号
		throw face_cl_exception(SOURCE_AT,e,stream.str());
	}catch(face_exception&e){
		std::stringstream stream;
		stream<<"the arg No:"<<N<<e.what();// 动态参数编号
		throw face_cl_exception(SOURCE_AT,stream.str());
	}catch(std::exception&e){
		std::stringstream stream;
		stream<<"the arg No:"<<N;// 动态参数编号
		throw face_cl_exception(SOURCE_AT,e,stream.str());
	}catch(...){
		std::stringstream stream;
		stream<<"the arg No:"<<N<<":unknow exception";// 动态参数编号
		throw face_cl_exception(SOURCE_AT,stream.str());
	}
}
// 特例:参数表为空
template<int N>
inline void upload_args_if_need(const cl::CommandQueue &command_queue){
}
/* 递归处理Args中的每一个参数
 * 如果是memory_cl类型的对象，则上传数据到设备
 * */
template<int N,typename ARG1,typename... Args>
inline void upload_args_if_need(const cl::CommandQueue &command_queue,ARG1 && arg1,Args&&... args){
	upload_arg_if_need<N>	(command_queue,std::forward<ARG1>(arg1));//处理第一个参数
	upload_args_if_need<N+1>	(command_queue,std::forward<Args>(args)...);//处理其他参数
}
// 参数ARG为非memory_cl类型
template<int N,typename ARG>
typename std::enable_if<!is_kind_of_memory_cl<ARG>::value>::type
inline download_arg_if_need(const cl::CommandQueue &command_queue,bool download, const ARG & arg){}
// 参数ARG是memory_cl类型,时根据需要下载数据到主机
template<int N,typename ARG>
typename std::enable_if<is_kind_of_memory_cl<ARG>::value>::type
inline download_arg_if_need(const cl::CommandQueue &command_queue,bool download, const ARG & arg){
	if(download){
		try{
			auto m=dynamic_cast<const cl::Memory &>(arg.cl_mem_obj);
			auto flags=m.getInfo<CL_MEM_FLAGS>();
			// 根据CL_MEM_FLAGS判断是否为输出数据对象，以决定是否需要下载数据
			if(flags&(CL_MEM_WRITE_ONLY|CL_MEM_READ_WRITE)){
				const_cast<ARG&>(arg).download_if_need(command_queue);//下载数据到设备
			}
		}catch(cl::Error&e){
			std::stringstream stream;
			stream<<"the arg No:"<<N;// 动态参数编号
			throw face_cl_exception(SOURCE_AT,e,stream.str());
		}catch(face_exception&e){
			std::stringstream stream;
			stream<<"the arg No:"<<N<<e.what();// 动态参数编号
			throw face_cl_exception(SOURCE_AT,stream.str());
		}catch(std::exception&e){
			std::stringstream stream;
			stream<<"the arg No:"<<N;// 动态参数编号
			throw face_cl_exception(SOURCE_AT,e,stream.str());
		}catch(...){
			std::stringstream stream;
			stream<<"the arg No:"<<N<<":unknow exception";// 动态参数编号
			throw face_cl_exception(SOURCE_AT,stream.str());
		}
	}
}
// 特例:参数表为空
template<int N>
inline void download_args_if_need(const cl::CommandQueue &command_queue,bool download){}
/* 递归处理Args中的每一个参数
 * 如果是memory_cl类型的对象，则根据download参数的指示下载数据到主机
 * */
template<int N,typename ARG1,typename... Args>
inline void download_args_if_need(const cl::CommandQueue &command_queue,bool download, ARG1 && arg1,Args&&... args){
	download_arg_if_need<N>(command_queue,download,std::forward<ARG1>(arg1));//处理第一个参数
	download_args_if_need<N+1>(command_queue,download,std::forward<Args>(args)...);//处理其他参数
}

/* 模板函数返回make_kernel执行里需要的类
 * 对于普通的类，就是类本身
 * 对于memory_cl的子类，返回memory_cl::cl_cpp_type
 *  */
template<typename ARG
		,typename ARG_TYPE=typename std::decay<ARG>::type
		,typename MEM_CL= is_kind_of_memory_cl<ARG>
		,typename K_TYPE=typename std::conditional<MEM_CL::value,typename MEM_CL::cl_type,ARG>::type
		>
struct kernel_type {
	using type= K_TYPE;
};
/*
 * 模板函数
 * 根据模板参数，实例化cl::make_kernel类
 * 创建cl::make_kernel类时所有的模板参数都会调用 kernel_type模板函数,
 * 以获取实例化cl::make_kernel时真正需要的类型
*/
template <
   typename T0,   typename T1 = cl::detail::NullType,   typename T2 = cl::detail::NullType,
   typename T3 = cl::detail::NullType,   typename T4 = cl::detail::NullType,
   typename T5 = cl::detail::NullType,   typename T6 = cl::detail::NullType,
   typename T7 = cl::detail::NullType,   typename T8 = cl::detail::NullType,
   typename T9 = cl::detail::NullType,   typename T10 = cl::detail::NullType,
   typename T11 = cl::detail::NullType,   typename T12 = cl::detail::NullType,
   typename T13 = cl::detail::NullType,   typename T14 = cl::detail::NullType,
   typename T15 = cl::detail::NullType,   typename T16 = cl::detail::NullType,
   typename T17 = cl::detail::NullType,   typename T18 = cl::detail::NullType,
   typename T19 = cl::detail::NullType,   typename T20 = cl::detail::NullType,
   typename T21 = cl::detail::NullType,   typename T22 = cl::detail::NullType,
   typename T23 = cl::detail::NullType,   typename T24 = cl::detail::NullType,
   typename T25 = cl::detail::NullType,   typename T26 = cl::detail::NullType,
   typename T27 = cl::detail::NullType,   typename T28 = cl::detail::NullType,
   typename T29 = cl::detail::NullType,   typename T30 = cl::detail::NullType,
   typename T31 = cl::detail::NullType
>
struct make_make_kernel{
	using type=cl::make_kernel<
			typename kernel_type<T0>::type,		typename kernel_type<T1>::type,
			typename kernel_type<T2>::type,		typename kernel_type<T3>::type,
			typename kernel_type<T4>::type,		typename kernel_type<T5>::type,
			typename kernel_type<T6>::type,		typename kernel_type<T7>::type,
			typename kernel_type<T8>::type,		typename kernel_type<T9>::type,
			typename kernel_type<T10>::type,	typename kernel_type<T11>::type,
			typename kernel_type<T12>::type,	typename kernel_type<T13>::type,
			typename kernel_type<T14>::type,	typename kernel_type<T15>::type,
			typename kernel_type<T16>::type,	typename kernel_type<T17>::type,
			typename kernel_type<T18>::type,	typename kernel_type<T19>::type,
			typename kernel_type<T20>::type,	typename kernel_type<T21>::type,
			typename kernel_type<T22>::type,	typename kernel_type<T23>::type,
			typename kernel_type<T24>::type,	typename kernel_type<T25>::type,
			typename kernel_type<T26>::type,	typename kernel_type<T27>::type,
			typename kernel_type<T28>::type,	typename kernel_type<T29>::type,
			typename kernel_type<T30>::type,	typename kernel_type<T31>::type
			>;
};
/*
 * 根据执行指定的kernel,
 * kernel执行前根据参数类型自动上传OpenCL内存数据对象(cl::Memory)到设备
 * kernel执行结束后，会根据download的指示下载输出内存对象的数据到主机
 * 出错则抛出异常
 * */
template<typename... Args>
inline void run_kernel(const cl::Kernel &kernel // kernel对象
		, const cl::EnqueueArgs &queue_args// 队列参数对象
		, bool download_if_need // kernel执行结束后是否下载结果数据
		, Args&&... args //  kernel参数表
		){
	// 根据需要上传cl::Memory对象的数据到设备
	upload_args_if_need<1>(queue_args.queue_,std::forward<Args>(args)...);
	//cl::make_kernel< Args...>k(kernel);// 创建cl::make_kernel算子对象
	typename make_make_kernel<Args...>::type k(kernel);
	k(queue_args,std::forward<Args>(args)...); // 执行kernel
	if(download_if_need)
		queue_args.queue_.finish();
	// 根据download标记需要下载cl::Memory对象的数据到主机
	download_args_if_need<1>(queue_args.queue_,download_if_need,std::forward<Args>(args)...);
}

} /* namespace gdface */




#endif /* FACEDETECT_MEMORY_CL_H_ */
