#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"

#define M 2048
#define N 2048
#define K 2048
#define TILE_SIZE 16
#define COMP 6

# define timersub(a, b, result)                                         \
	do {                                                            \
		(result)->tv_sec = (a)->tv_sec - (b)->tv_sec;           \
		(result)->tv_usec = (a)->tv_usec - (b)->tv_usec;        \
		if ((result)->tv_usec < 0) {                            \
			--(result)->tv_sec;                             \
			(result)->tv_usec += 1000000;                   \
		}                                                       \
	} while (0)

static texture<float,1>  tex_x_float_A;
static texture<float,1>  tex_x_float_B;

static texture<float,2,cudaReadModeElementType> tex_xy_float_A;
static texture<float,2,cudaReadModeElementType> tex_xy_float_B;

static __inline__ __device__ float fetch_x_A(const int& i)
{
  return tex1Dfetch(tex_x_float_A, i);
}

static __inline__ __device__ float fetch_x_B(const int& i)
{
  return tex1Dfetch(tex_x_float_B, i);
}

static __inline__ __device__ float fetch_xy_A(const float& i, const float &j)
{
  return tex2D(tex_xy_float_A, i, j);
}

static __inline__ __device__ float fetch_xy_B(const float& i, const float &j)
{
  return tex2D(tex_xy_float_B, i, j);
}

__global__ void kernel1(float *C, const float *A, const float *B, int m, int n,  int k)
{
	const int row = blockIdx.x * blockDim.x + threadIdx.x;
	const int col = blockIdx.y * blockDim.y + threadIdx.y;

	if (row >= m) return;
	if (col >= n) return;

	float sum = 0;
	for (int i = 0; i < k; ++i) {
		sum += A[row * k + i] * B[i * n + col];
	}

	C[row * n + col] = sum;
}

__global__ void kernel2(float *C, const float *A, const float *B, int m, int n,  int k)
{
	const int row = blockIdx.y * blockDim.y + threadIdx.y;
	const int col = blockIdx.x * blockDim.x + threadIdx.x;

	if (row >= m) return;
	if (col >= n) return;

	float sum = 0;
	for (int i = 0; i < k; ++i) {
		sum += A[row * k + i] * B[i * n + col];
	}

	C[row * n + col] = sum;
}

__global__ void kernel3(float *C, const float *A, const float *B, int m, int n,  int k)
{
	int localX = threadIdx.x;
	int localY = threadIdx.y; 

	const int col = blockIdx.x * TILE_SIZE + localX;
	const int row = blockIdx.y * TILE_SIZE + localY;

	__shared__ float Ab[TILE_SIZE][TILE_SIZE];
	__shared__ float Bb[TILE_SIZE][TILE_SIZE];

	float sum=0;
#pragma unroll
	for (int i = 0; i < k ; i += TILE_SIZE) {
		Ab[localY][localX] = A[row * k + (localX + i)];
		Bb[localY][localX] = B[(localY + i) * n + col];
		__syncthreads();

#pragma unroll
		for (int x=0;x<TILE_SIZE;++x) {
			sum += Ab[localY][x] * Bb[x][localX];
		}
		__syncthreads();
	}
	C[row * n + col] = sum;

}

__global__ void kernel4(float *C, const float *A, const float *B, int m, int n,  int k)
{
	const int col = blockIdx.x * 96 + threadIdx.x;
	const int row = blockIdx.y * 96 + threadIdx.y;
	
	__shared__ float Ab[16 * COMP][16];
	__shared__ float Bb[16][16 * COMP];
	
	float sum[COMP * COMP];
	
	for (int i=0;i<k;i+=16) {
#pragma unroll
		for (int j=0;j<COMP;++j) {
			if ((i + threadIdx.x) >= k || (row + j * 16) >= m) {
				Ab[threadIdx.y + j * 16][threadIdx.x] = 0;
			}
			else {
				Ab[threadIdx.y + j * 16][threadIdx.x] = A[(row + j * 16) * k + i + threadIdx.x];
			}
		}
		
#pragma unroll
		for (int j=0;j<COMP;++j) {
			if ((i + threadIdx.y) >= k || (col + j * 16) >= n) {
				Bb[threadIdx.y][threadIdx.x + j * 16] = 0;
			}
			else {
				Bb[threadIdx.y][threadIdx.x + j * 16] = B[(i + threadIdx.y) * n + col + j * 16];
			}
		}
		
		__syncthreads();
		
#pragma unroll
		for (int x=0;x<16;++x) {
#pragma unroll
			for (int r=0;r<COMP;++r) {
#pragma unroll
				for (int c=0;c<COMP;++c) {
					sum[r * COMP + c] += Ab[threadIdx.y + r * 16][x] * Bb[x][threadIdx.x + c * 16];
				}
			}
		}
		__syncthreads();
	};

#pragma unroll
	for (int r=0;r<COMP;++r) {
#pragma unroll
		for (int c=0;c<COMP;++c) {
			if ( ((row + r * 16) < m) && ((col + c * 16)) < n ) {
				C[(row + r * 16) * n + col + c * 16] = sum[r * COMP + c];
			}
		}
	}
}

__global__ void kernel5(float *C, const float *A, const float *B, int m, int n,  int k)
{

	const int col = blockIdx.x * 96 + threadIdx.x;
	const int row = blockIdx.y * 96 + threadIdx.y;

	__shared__ float Ab[16 * COMP][16];
	__shared__ float Bb[16][16 * COMP];

	float sum[COMP * COMP];

	for (int i=0;i<k;i+=16) {
#pragma unroll
		for (int j=0;j<COMP;++j) {
			if ((i + threadIdx.x) >= k || (row + j * 16) >= m) {
				Ab[threadIdx.y + j * 16][threadIdx.x] = 0;
			}
			else {
				Ab[threadIdx.y + j * 16][threadIdx.x] = A[(row + j * 16) * k + i + threadIdx.x];
			}
		}

#pragma unroll
		for (int j=0;j<COMP;++j) {
			if ((i + threadIdx.y) >= k || (col + j * 16) >= n) {
				Bb[threadIdx.y][threadIdx.x + j * 16] = 0;
			}
			else {
				Bb[threadIdx.y][threadIdx.x + j * 16] = B[(i + threadIdx.y) * n + col + j * 16];
			}
		}

		__syncthreads();

#pragma unroll
		for (int x=0;x<16;++x) {
#pragma unroll
			for (int r=0;r<COMP;++r) {
#pragma unroll
				for (int c=0;c<COMP;++c) {
					sum[r * COMP + c] += Ab[threadIdx.y + r * 16][(x+threadIdx.x) % 16] * Bb[(x+threadIdx.x)%16][threadIdx.x + c * 16];
				}
			}
		}
		__syncthreads();
	};

#pragma unroll
	for (int r=0;r<COMP;++r) {
#pragma unroll
		for (int c=0;c<COMP;++c) {
			if ( ((row + r * 16) < m) && ((col + c * 16)) < n ) {
				C[(row + r * 16) * n + col + c * 16] = sum[r * COMP + c];
			}
		}
	}
}

__global__ void 
kernel6(float *C, const float *A, const float *B, int m, int n,  int k,  int lda,  int ldb, int ldc, int offsetA, int offsetB) 
{
	const  int tx = threadIdx.x;
	const  int ty = threadIdx.y;

	const int iby = blockIdx.y * 96;
	const int ibx = blockIdx.x * 96;
	const int idt = ty * 64 + tx;

	const int tx2 = idt%16;
	const int ty2 = idt/16;

	__shared__ float Bb[16][97];
	__shared__ float Abs[96][17];

	float xxA[6];
	float xxB[6];

	int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
	int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);

	A += trackA; 
	B += trackB; 

	int tll = tx2; 

#pragma unroll
	for(int y=0; y<6; y++)
		Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA +  lda*16*y);

#pragma unroll
	for(int y=0; y<6; y++)
		Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y);
	__syncthreads();

	const float *Bend = B + k*ldb - 16*ldb;

	float Axs[6];
	float Bxp[6];

	float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0,  0,0,0,0,0,0, 0,0,0,0,0,0,
		0,0,0,0,0,0, 0,0,0,0,0,0};
	do 
	{
		tll+=16;
		A += 16;
		B += 16*ldb;
		trackA+=16; 
		trackB+=16*ldb;

#pragma unroll
		for( int y=0; y<6; y++)
			xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16);

#pragma unroll
		for( int y=0; y<6; y++)
			xxB[y] = fetch_x_B(trackB + 16*y);

#pragma unroll 
		for( int j1=0;j1<16;j1++)
		{
#pragma unroll
			for( int y=0; y<6; y++)
				Axs[y] =  Abs[tx2 + y*16][j1];

#pragma unroll
			for( int y=0; y<6; y++)
				Bxp[y]= Bb[j1][ty2 + y*16];

#pragma unroll 
			for( int x=0; x<6; x++)
#pragma unroll 
				for( int y=0;y<6;y++)
					Cb[x*6+y] += Axs[x]*Bxp[y];
		}
		__syncthreads();

#pragma unroll
		for( int y=0; y<6; y++)
			Abs[ty2 + 16*y][tx2] = xxA[y];

#pragma unroll
		for( int y=0; y<6; y++)
			Bb[ty2][tx2+y*16] = xxB[y];

		__syncthreads();
	} 
	while (B < Bend);

	C += tx2 + ibx  + __mul24 (ty2 +  iby ,ldc);

#pragma unroll 
	for( int j1=0; j1<16; j1++)
	{
#pragma unroll
		for( int y=0; y<6; y++)
			Axs[y] =  Abs[tx2 + y*16][j1];

#pragma unroll
		for( int y=0; y<6; y++)
			Bxp[y]= Bb[j1][ty2 + y*16];

#pragma unroll 
		for( int x=0; x<6; x++)
#pragma unroll 
			for( int y=0; y<6; y++)
				Cb[x*6+y]  += Axs[x]*Bxp[y];
	}

	int gy = iby + ty2;
#pragma unroll
	for( int y=0; y<6; y++, gy+=16)
	{
		int gx = ibx + tx2; 
#pragma unroll
		for(int x=0; x<6; x++, gx+=16)
		{
			if (gx < m && gy < n)
				C[x*16] = Cb[y+x*6] + 0 * C[x*16];
		}

		C+=ldc*16;
	}

}


__global__ void 
kernel7(float *C, const float *A, const float *B, int m, int n,  int k,  int lda,  int ldb, int ldc, int offsetA, int offsetB) 
{
	const  int tx = threadIdx.x;
	const  int ty = threadIdx.y;
	
	const int iby = blockIdx.y * 96;
	const int ibx = blockIdx.x * 96;
	const int idt = ty * 64 + tx;

	const int tx2 = idt%16;
	const int ty2 = idt/16;
	
	__shared__ float Bb[16][97];
	__shared__ float Abs[96][17];
	
	float xxA[6];
	float xxB[6];
	
	int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
	int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);

	float counter = 0.0;
	
	A += trackA; 
	B += trackB; 
	
	int tll = tx2; 

#pragma unroll
	for(int y=0; y<6; y++)
		//Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_xy_A(trackA +  lda*16*y);
		Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_xy_A(tx2 , ty2+ibx+ (float)16*y  );
	
#pragma unroll
	for(int y=0; y<6; y++)
		//Bb[ty2][tx2+16*y] = fetch_xy_B(trackB+16*y);
		Bb[ty2][tx2+16*y] = fetch_xy_B(iby + tx2 + (float)16*y, ty2  );
	__syncthreads();
	
	const float *Bend = B + k*ldb - 16*ldb;

	float Axs[6];
	float Bxp[6];
	
	float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0,  0,0,0,0,0,0, 0,0,0,0,0,0,
		0,0,0,0,0,0, 0,0,0,0,0,0};
	do 
	{
		tll+=16;
		A += 16;
		B += 16*ldb;
//		trackA+=16; 
//		trackB+=16*ldb;
		counter += 16;
		
#pragma unroll
		for( int y=0; y<6; y++)
			//xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16);
			xxA[y] = /* (tll<k)* */ fetch_xy_A(tx2+counter, ty2+ibx+(float)16*y );
		
#pragma unroll
		for( int y=0; y<6; y++)
//			xxB[y] = fetch_x_B(trackB + 16*y);
			xxB[y] = fetch_xy_B(iby + tx2+16*y, ty2+counter);
		
#pragma unroll 
		for( int j1=0;j1<16;j1++)
		{
#pragma unroll
			for( int y=0; y<6; y++)
				Axs[y] =  Abs[tx2 + y*16][j1];
			
#pragma unroll
			for( int y=0; y<6; y++)
				Bxp[y]= Bb[j1][ty2 + y*16];
			
#pragma unroll 
			for( int x=0; x<6; x++)
#pragma unroll 
				for( int y=0;y<6;y++)
					Cb[x*6+y] += Axs[x]*Bxp[y];
		}
		__syncthreads();
		
#pragma unroll
		for( int y=0; y<6; y++)
			Abs[ty2 + 16*y][tx2] = xxA[y];
		
#pragma unroll
		for( int y=0; y<6; y++)
			Bb[ty2][tx2+y*16] = xxB[y];
		
		__syncthreads();
	} 
	while (B < Bend);
	
	C += tx2 + ibx  + __mul24 (ty2 +  iby ,ldc);
	
#pragma unroll 
	for( int j1=0; j1<16; j1++)
	{
#pragma unroll
		for( int y=0; y<6; y++)
			Axs[y] =  Abs[tx2 + y*16][j1];
		
#pragma unroll
		for( int y=0; y<6; y++)
			Bxp[y]= Bb[j1][ty2 + y*16];
		
#pragma unroll 
		for( int x=0; x<6; x++)
#pragma unroll 
			for( int y=0; y<6; y++)
				Cb[x*6+y]  += Axs[x]*Bxp[y];
	}
	
	int gy = iby + ty2;
#pragma unroll
	for( int y=0; y<6; y++, gy+=16)
	{
		int gx = ibx + tx2; 
#pragma unroll
		for(int x=0; x<6; x++, gx+=16)
		{
			if (gx < m && gy < n)
				C[x*16] = Cb[y+x*6] + 0 * C[x*16];
		}
		
		C+=ldc*16;
	}
	
}

int main(int argc, const char *argv[])
{
	struct timeval time_start, time_end, time_res;

	int count = 0;
	float alpha = 1.0;
	float beta = 0;

	float *m1_h, *m2_h, *o_h;
	float *m1_d, *m2_d, *o_d;
	float *o_h2;

	m1_h = (float *) malloc(M * K * sizeof(float));
	m2_h = (float *) malloc(K * N * sizeof(float));
	o_h = (float *) malloc(M * N * sizeof(float));
	o_h2 = (float *) malloc(M * N * sizeof(float));

	cudaMalloc( (void **)&m1_d, M * K * sizeof(float));
	cudaMalloc( (void **)&m2_d, K * N * sizeof(float));
	cudaMalloc( (void **)&o_d, M * N * sizeof(float));

	// Initialize host array and copy it to CUDA device
	for (int r = 0 ; r < M; ++r) {
		for (int c = 0 ; c < K; ++c) {
			m1_h[r*K + c] = rand()/(float)RAND_MAX;
		}
	}

	for (int r = 0 ; r < K; ++r) {
		for (int c = 0 ; c < N; ++c) {
			m2_h[r*N + c] = rand()/(float)RAND_MAX;
		}
	}

	cudaMemcpy(m1_d, m1_h, M * K * sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(m2_d, m2_h, K * N * sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(o_d, o_h, M * N * sizeof(float), cudaMemcpyHostToDevice);

	cublasHandle_t handle;

	if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS){
		printf("cublasCteate fail\n");
		exit(-1);
	}

	//get cublas result for comparison
	gettimeofday(&time_start, NULL);

	cublasSgemm(handle, CUBLAS_OP_T,CUBLAS_OP_T, M, K, N, &alpha, m1_d, K, m2_d, N, &beta, o_d, N);
	cudaDeviceSynchronize();

	cudaMemcpy(o_h2, o_d, M*N*sizeof(float), cudaMemcpyDeviceToHost);


	//set grid size and block size
	int i = 16;
	dim3 grid((M + i - 1)/i, (N + i - 1)/i, 1);
	dim3 threads(i, i, 1);

	//timer start
	gettimeofday(&time_start, NULL);

	kernel1<<< grid, threads >>> ( o_d, m1_d, m2_d, M, N, K );

	cudaDeviceSynchronize();

	//timer end
	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel1 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);
	cudaDeviceSynchronize();


	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[j*N+i]) > 0.001 || (o_h[i*N+j] - o_h2[j*N+i]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);

	//timer start
	gettimeofday(&time_start, NULL);

	kernel2<<< grid, threads >>> ( o_d, m1_d, m2_d, M, N, K );

	cudaDeviceSynchronize();

	//timer end
	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel2 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);
	cudaDeviceSynchronize();


	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[j*N+i]) > 0.001 || (o_h[i*N+j] - o_h2[j*N+i]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);

	//timer start
	gettimeofday(&time_start, NULL);

	kernel3<<< grid, threads >>> ( o_d, m1_d, m2_d, M, N, K );

	cudaDeviceSynchronize();

	//timer end
	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel3 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);
	cudaDeviceSynchronize();


	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[j*N+i]) > 0.001 || (o_h[i*N+j] - o_h2[j*N+i]) < -0.001){
				count++;
			}
		}
	}

	if(count != 0)
		printf("number of differences : %d\n",count);
	
	dim3 grid2((M + 95)/96, (N + 95)/96, 1);
	dim3 threads2(TILE_SIZE, TILE_SIZE, 1);

	//timer start
	gettimeofday(&time_start, NULL);

	kernel4<<< grid2, threads2 >>> ( o_d, m1_d, m2_d, M, N, K );

	cudaDeviceSynchronize();

	//timer end
	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel4 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);
	cudaDeviceSynchronize();


	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[j*N+i]) > 0.001 || (o_h[i*N+j] - o_h2[j*N+i]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);

	//timer start
	gettimeofday(&time_start, NULL);

	kernel5<<< grid2, threads2 >>> ( o_d, m1_d, m2_d, M, N, K );

	cudaDeviceSynchronize();

	//timer end
	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel5 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);
	cudaDeviceSynchronize();


	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[j*N+i]) > 0.001 || (o_h[i*N+j] - o_h2[j*N+i]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);


	size_t sizeA = K * M;
	size_t sizeB = N * K;

	size_t offsetA = 0;
	size_t offsetB = 0;

	if (cudaBindTexture(&offsetA, tex_x_float_A, m1_d, sizeA * sizeof(float)) != cudaSuccess) {
		printf("can not bind to texture\n");
		exit(-1);
	}
	if (cudaBindTexture(&offsetB, tex_x_float_B, m2_d, sizeB * sizeof(float)) != cudaSuccess) {
		printf("can not bind to texture\n");
		exit(-1);
	}

	dim3 threads3( 64, 4 );
	dim3 grid3( (M + 95)/96, (N + 95)/96 );

	cudaDeviceSynchronize();

	gettimeofday(&time_start, NULL);

	kernel6<<< grid3, threads3 >>> ( o_d, m1_d, m2_d, M, N, K , K, N, M ,offsetA, offsetB);
	cudaDeviceSynchronize();

	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel6 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);


	cudaUnbindTexture (tex_x_float_A);
	cudaUnbindTexture (tex_x_float_B);

	cudaMemcpy(o_h,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);

	cudaDeviceSynchronize();

	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[i*N+j]) > 0.001 || (o_h[i*N+j] - o_h2[i*N+j]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);
	
	cudaArray* carrayA;
	cudaArray* carrayB;

	cudaChannelFormatDesc channel1;
	cudaChannelFormatDesc channel2;

//	size_t pitch1;
//	size_t pitch2;

	channel1 = cudaCreateChannelDesc<float>();
	channel2 = cudaCreateChannelDesc<float>();

//	cudaMallocPitch((void**)&carrayA,&pitch1,K*sizeof(float),M);
//	cudaMallocPitch((void**)&carrayB,&pitch2,N*sizeof(float),K);

	cudaMallocArray(&carrayA, &channel1, M, K);
	cudaMallocArray(&carrayB, &channel2, K, N);

	cudaMemcpyToArray(carrayA,0,0,m1_h,sizeof(float)*sizeA,cudaMemcpyHostToDevice);
	cudaMemcpyToArray(carrayB,0,0,m2_h,sizeof(float)*sizeB,cudaMemcpyHostToDevice);

//	cudaMemcpy2DToArray(carrayA,0,0,m1_h,pitch1,K,M ,cudaMemcpyHostToDevice);
//	cudaMemcpy2DToArray(carrayB,0,0,m2_h,pitch2,N,K ,cudaMemcpyHostToDevice);

	tex_xy_float_A.filterMode=cudaFilterModePoint;
	tex_xy_float_A.addressMode[0] = cudaAddressModeClamp;
	tex_xy_float_A.addressMode[1] = cudaAddressModeClamp;
	
	tex_xy_float_B.filterMode=cudaFilterModePoint;
	tex_xy_float_B.addressMode[0] = cudaAddressModeClamp;
	tex_xy_float_B.addressMode[1] = cudaAddressModeClamp;

	cudaBindTextureToArray(tex_xy_float_A,carrayA);
	cudaBindTextureToArray(tex_xy_float_B,carrayB);

	cudaDeviceSynchronize();

	gettimeofday(&time_start, NULL);

	kernel7 <<< grid3, threads3 >>> ( o_d, m1_d, m2_d, M, N, K , K, N, M ,offsetA, offsetB);
	cudaDeviceSynchronize();

	gettimeofday(&time_end, NULL);

	timersub(&time_end, &time_start, &time_res);

	printf("kernel7 : %ld,%03ld ms\n", time_res.tv_sec, time_res.tv_usec/1000);


	cudaUnbindTexture (tex_xy_float_A);
	cudaUnbindTexture (tex_xy_float_B);

	cudaMemcpy(o_h2,o_d,M*N*sizeof(float), cudaMemcpyDeviceToHost);

	cudaDeviceSynchronize();
	
	//compare differences (transpose for colomn major)
	for(int i = 0 ; i < M ; i ++){
		for(int j = 0 ; j < N ; j++){
			if( (o_h[i*N+j] - o_h2[i*N+j]) > 0.001 || (o_h[i*N+j] - o_h2[i*N+j]) < -0.001){
				count++;
			}
		}
	}
	if(count != 0)
		printf("number of differences : %d\n",count);

	free(m1_h);
	free(m2_h);
	free(o_h);

	cudaFree(m1_d);
	cudaFree(m2_d);
	cudaFree(o_d);
}

