/**********************************************************************
Copyright �2013 Advanced Micro Devices, Inc. All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

�   Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
�   Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
 other materials provided with the distribution.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
 OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/


#include "FFT.hpp"
char cl_kernel_name[] = "FFT_Kernels.cl";

template<typename T>
void printArray1(
    const std::string header,
    const T * data,
    const int width,
    const int height)
{
    std::cout<<"header=" << header<<"\n";
    for(int i = 0; i < height; i++)
    {
        for(int j = 0; j < width; j++)
        {
            std::cout << std::setw(4) <<data[i*width+j]<<" ";
            if(0 == ((j+1) % width))
            {
                std::cout<<"\n";
            }
        }
        //std::cout<<"\n";
    }
    std::cout<<"\n";
}
#define PI 3.1415926535
static void func_write_txt(const char *file_name, const char *write_mode, int sample_count, float *dataR)
{
    FILE *fp = NULL;
    int i= 0;
	if((fp=fopen(file_name, write_mode))==NULL)
	{
        std::cout << " func:" << __func__ << " line:" << __LINE__ << "fopen %s failed!" << file_name << std::endl;

 		exit(0);
	}
	for(i = 0; i < sample_count; i++)
		fprintf(fp,"%f,",dataR[i]);
	fclose(fp);
}
int TwoDimConvolution::setupTwoDimConvolution()
{
    cl_uint inputSizeBytes;
	int f1 = 100;//   	# 信号的特征频率1
	int f2 = 300;//   # 信号的特征频率2
	int f3 = 500;//   # 信号的特征频率3
	int sample_count = width * height;
    // allocate and init memory used by host
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;
    inputSizeBytes = width * height * sizeof(cl_float);
    input  = (cl_float *) malloc(inputSizeBytes);
    CHECK_ALLOCATION(input, "Failed to allocate host memory. (input)");

    output = (cl_float  *) malloc(inputSizeBytes);
    CHECK_ALLOCATION(output, "Failed to allocate host memory. (output)");

    input_m = (cl_float  *) malloc(inputSizeBytes);
    CHECK_ALLOCATION(input_m, "Failed to allocate host memory (input_m)");
#if 1
    for(cl_uint i = 0; i < width*height; i++)
    {
        float j = i;
		input[i] = 10 + 7 * sin(2*PI*f1*(j*1/sample_count)) + 5 * sin(2*PI*f2* (j*1/sample_count)) + 3 * sin(2*PI*f3* (j*1/sample_count));
        //input[i] = width*height - i;
        input_m[i] = 0;
        output[i] = 0;
    }
    func_write_txt("cl_cpu_input.txt", "wb", sample_count, input);
    for(cl_uint i = 0; i < width*height; i++)
    {
    }
#else

#endif

    // Unless quiet mode has been enabled, print the INPUT array.
    if(!sampleArgs->quiet)
    {
        printArray<cl_float >(
            "Original Input",
            input,
            width,
            height);
        printArray<cl_float >(
            "input_m",
            input_m,
            width,
            height);
    }

    return SDK_SUCCESS;
}


int
TwoDimConvolution::genBinaryImage()
{
    bifData binaryData;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << std::endl;
    binaryData.kernelName = std::string(cl_kernel_name);
    binaryData.flagsStr = std::string("");
    if(sampleArgs->isComplierFlagsSpecified())
    {
        binaryData.flagsFileName = std::string(sampleArgs->flags.c_str());
    }

    binaryData.binaryName = std::string(sampleArgs->dumpBinary.c_str());
    int status = generateBinaryImage(binaryData);
    return status;
}


int
TwoDimConvolution::setupCL(void)
{
    cl_int status = 0;
    cl_device_type dType;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << std::endl;

    if(sampleArgs->deviceType.compare("cpu") == 0)
    {
        dType = CL_DEVICE_TYPE_CPU;
    }
    else //deviceType = "gpu"
    {
        dType = CL_DEVICE_TYPE_GPU;
        if(sampleArgs->isThereGPU() == false)
        {
            std::cout << "GPU not found. Falling back to CPU device" << std::endl;
            dType = CL_DEVICE_TYPE_CPU;
        }
    }

    /*
     * Have a look at the available platforms and pick either
     * the AMD one if available or a reasonable default.
     */
    cl_platform_id platform = NULL;
    int retValue = getPlatform(platform, sampleArgs->platformId,
                               sampleArgs->isPlatformEnabled());
    CHECK_ERROR(retValue, SDK_SUCCESS, "getPlatform() failed");

    // Display available devices.
    retValue = displayDevices(platform, dType);
    CHECK_ERROR(retValue, SDK_SUCCESS, "displayDevices() failed");


    // If we could find our platform, use it. Otherwise use just available platform.
    cl_context_properties cps[3] =
    {
        CL_CONTEXT_PLATFORM,
        (cl_context_properties)platform,
        0
    };

    context = clCreateContextFromType(
                  cps,
                  dType,
                  NULL,
                  NULL,
                  &status);

    CHECK_OPENCL_ERROR( status, "clCreateContextFromType failed.");

    // getting device on which to run the sample
    status = getDevices(context, &devices, sampleArgs->deviceId,
                        sampleArgs->isDeviceIdEnabled());
    CHECK_ERROR(status, SDK_SUCCESS, "getDevices() failed");

    {
        // The block is to move the declaration of prop closer to its use
        cl_command_queue_properties prop = 0;
        commandQueue = clCreateCommandQueueWithProperties  (
                           context,
                           devices[sampleArgs->deviceId],
                           &prop,
                           &status);
        CHECK_OPENCL_ERROR( status, "clCreateCommandQueueWithProperties  failed.");
    }

    inputBuffer = clCreateBuffer(
                      context,
                      CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR,
                      sizeof(cl_uint ) * width * height,
                      input,
                      &status);
    CHECK_OPENCL_ERROR(status, "clCreateBuffer failed. (inputBuffer)");

    outputBuffer = clCreateBuffer(
                       context,
                       CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR,
                       sizeof(cl_uint ) * width * height,
                       output,
                       &status);
    CHECK_OPENCL_ERROR( status,  "clCreateBuffer failed. (outputBuffer)");

    mInput = clCreateBuffer(
                     context,
                     CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR,
                     sizeof(cl_float ) * width * height,
                     input_m,
                     &status);
    CHECK_OPENCL_ERROR( status, "clCreateBuffer failed. (mInput)");

    // create a CL program using the kernel source
    buildProgramData buildData;
    buildData.kernelName = std::string(cl_kernel_name);
    buildData.devices = devices;
    buildData.deviceId = sampleArgs->deviceId;
    buildData.flagsStr = std::string("");
    if(sampleArgs->isLoadBinaryEnabled())
    {
        buildData.binaryName = std::string(sampleArgs->loadBinary.c_str());
    }

    if(sampleArgs->isComplierFlagsSpecified())
    {
        buildData.flagsFileName = std::string(sampleArgs->flags.c_str());
    }

    retValue = buildOpenCLProgram(program, context, buildData);
    CHECK_ERROR(retValue, SDK_SUCCESS, "buildOpenCLProgram() failed");

    // get a kernel object handle for a kernel with the given name
    kernel = clCreateKernel(program, "cl_fft", &status);
    CHECK_OPENCL_ERROR(status, "clCreateKernel cl_fft failed.");

    return SDK_SUCCESS;
}

int TwoDimConvolution::setWorkGroupSize()
{
    cl_int status = 0;
    status = kernelInfo.setKernelWorkGroupInfo(kernel,
             devices[sampleArgs->deviceId]);
    CHECK_ERROR(status, SDK_SUCCESS, " setKernelWorkGroupInfo() failed");

    globalThreads[0] = 1;//width*height;
    globalThreads[1]  = width*height;//256;
    localThreads[0] = 1;
    localThreads[1]  = width*height;//256;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " globalThreads[0]:" << globalThreads[0] << " globalThreads[1]:" << globalThreads[1] << std::endl;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " localThreads[0]:" << localThreads[0] << " localThreads[1]:" << localThreads[1] << std::endl;

    if((cl_uint)(localThreads[0]) > kernelInfo.kernelWorkGroupSize)
    {
        if(!sampleArgs->quiet)
        {
            std::cout << "Out of Resources!" << std::endl;
            std::cout << "Group Size specified : "<< localThreads[0]
                      << std::endl;
            std::cout << "Max Group Size supported on the kernel : "
                      << kernelInfo.kernelWorkGroupSize <<std::endl;
            std::cout <<"Changing the group size to " << kernelInfo.kernelWorkGroupSize
                      << std::endl;
        }

        localThreads[0] = kernelInfo.kernelWorkGroupSize;
    }
    return SDK_SUCCESS;
}

int
TwoDimConvolution::runCLKernels(void)
{
    cl_int   status;
    cl_event events[2];

    status = this->setWorkGroupSize();
    CHECK_ERROR(status, SDK_SUCCESS, "setWorkGroupSize() failed");

    // Set appropriate arguments to the kernel
    status = clSetKernelArg(
                 kernel,
                 0,
                 sizeof(cl_mem),
                 (void *)&outputBuffer);
    CHECK_OPENCL_ERROR( status, "clSetKernelArg failed. (outputBuffer)");

    status = clSetKernelArg(
                 kernel,
                 1,
                 sizeof(cl_mem),
                 (void *)&inputBuffer);
    CHECK_OPENCL_ERROR( status, "clSetKernelArg failed. (inputBuffer)");

    status = clSetKernelArg(
                 kernel,
                 2,
                 sizeof(cl_mem),
                 (void *)&mInput);
    CHECK_OPENCL_ERROR( status, "clSetKernelArg failed. (mInput)");
    twoPower = globalThreads[0];
    twoIndex = globalThreads[1];
    
    std::cout << " func:" << __func__ << " line:" << __LINE__ << " twoIndex:" << twoIndex << " twoPower:" << twoPower << std::endl;

    status = clSetKernelArg(
                 kernel,
                 3,
                 sizeof(cl_uint),
                 (void *)&twoPower);
    CHECK_OPENCL_ERROR( status, "clSetKernelArg failed. (twoPower)");

    status = clSetKernelArg(
                 kernel,
                 4,
                 sizeof(cl_uint),
                 (void *)&twoIndex);
    CHECK_OPENCL_ERROR( status, "clSetKernelArg failed. (twoIndex)");


	std::cout << " func:" << __func__ << " line:" << __LINE__ << " globalThreads[0]:" << globalThreads[0] << " globalThreads[1]:" << globalThreads[1] << std::endl;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " localThreads[0]:" << localThreads[0] << " localThreads[1]:" << localThreads[1] << std::endl;

    // Enqueue a kernel run call.
    status = clEnqueueNDRangeKernel(
                 commandQueue,
                 kernel,
                 2,
                 NULL,
                 globalThreads,
                 localThreads,
                 0,
                 NULL,
                 &events[0]);
    CHECK_OPENCL_ERROR( status, "clEnqueueNDRangeKernel failed.");

    status = clFlush(commandQueue);
    CHECK_OPENCL_ERROR(status,"clFlush() failed");

    status = waitForEventAndRelease(&events[0]);
    CHECK_ERROR(status, SDK_SUCCESS, "WaitForEventAndRelease(events[0]) Failed");

    // Enqueue readBuffer
    status = clEnqueueReadBuffer(
                 commandQueue,
                 outputBuffer,
                 CL_TRUE,
                 0,
                 width * height * sizeof(cl_uint),
                 output,
                 0,
                 NULL,
                 &events[1]);
    CHECK_OPENCL_ERROR( status, "clEnqueueReadBuffer failed.");

    status = clFlush(commandQueue);
    CHECK_OPENCL_ERROR(status,"clFlush() failed");

    status = waitForEventAndRelease(&events[1]);
    CHECK_ERROR(status, SDK_SUCCESS, "WaitForEventAndRelease(events[1]) Failed");
    return SDK_SUCCESS;
}

/**
 * Reference CPU implementation of Simple Convolution
 * for performance comparison
 */
void
TwoDimConvolution::TwoDimConvolutionCPUReference(cl_uint  *output,
        const cl_uint  *input,
        const cl_float *input_m,
        const cl_uint  width,
        const cl_uint height)

{
    
}

int TwoDimConvolution::initialize()
{
    // Call base class Initialize to get default configuration
    if  (sampleArgs->initialize() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    // Now add customized options
    Option* width_option = new Option;
    CHECK_ALLOCATION(width_option, "Memory allocation error.\n");
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;

    width_option->_sVersion = "x";
    width_option->_lVersion = "width";
    width_option->_description = "Width of the input matrix";
    width_option->_type = CA_ARG_INT;
    width_option->_value = &width;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;

    sampleArgs->AddOption(width_option);
    delete width_option;

    Option* height_option = new Option;
    CHECK_ALLOCATION(height_option, "Memory allocation error.\n");

    height_option->_sVersion = "y";
    height_option->_lVersion = "height";
    height_option->_description = "Height of the input matrix";
    height_option->_type = CA_ARG_INT;
    height_option->_value = &height;
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;

    sampleArgs->AddOption(height_option);
    delete height_option;

    Option* num_iterations = new Option;
    CHECK_ALLOCATION(num_iterations, "Memory allocation error.\n");

    num_iterations->_sVersion = "i";
    num_iterations->_lVersion = "iterations";
    num_iterations->_description = "Number of iterations for kernel execution";
    num_iterations->_type = CA_ARG_INT;
    num_iterations->_value = &iterations;

    sampleArgs->AddOption(num_iterations);
    delete num_iterations;

    return SDK_SUCCESS;
}

int TwoDimConvolution::setup()
{
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;

    if(!isPowerOf2(width))
    {
        width = roundToPowerOf2(width);
    }
    if(!isPowerOf2(height))
    {
        height = roundToPowerOf2(height);
    }
	std::cout << " func:" << __func__ << " line:" << __LINE__ << " width:" << width << " height:" << height << std::endl;

    if (setupTwoDimConvolution() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    int timer = sampleTimer->createTimer();
    sampleTimer->resetTimer(timer);
    sampleTimer->startTimer(timer);

    if (setupCL() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    sampleTimer->stopTimer(timer);

    setupTime = (cl_double)sampleTimer->readTimer(timer);

    return SDK_SUCCESS;
}


int TwoDimConvolution::run()
{
    int timer = sampleTimer->createTimer();
    sampleTimer->resetTimer(timer);
    sampleTimer->startTimer(timer);

    std::cout << "Executing kernel for " << iterations <<
              " iterations" << std::endl;
    std::cout << "-------------------------------------------" << std::endl;

    for(int i = 0; i < iterations; i++)
    {
        // Arguments are set and execution call is enqueued on command buffer
        if (runCLKernels() != SDK_SUCCESS)
        {
            return SDK_FAILURE;
        }
    }

    sampleTimer->stopTimer(timer);
    totalKernelTime = (double)(sampleTimer->readTimer(timer)) / iterations;

    if(!sampleArgs->quiet)
    {
        printArray<cl_float >("Output", output, width, height);
    }

    return SDK_SUCCESS;
}

int TwoDimConvolution::verifyResults()
{
    if(sampleArgs->verify)
    {
        #if 1
        std::cout<<"First do not verify \n" << std::endl;
        #else

        #endif
    }

    return SDK_SUCCESS;
}

void TwoDimConvolution::printStats()
{
    if(sampleArgs->timing)
    {
        std::string strArray[5] = {"Width", "Height", "Time(sec)", "KernelTime(sec)"};
        std::string stats[5];

        sampleTimer->totalTime = setupTime + totalKernelTime;

        stats[0]  = toString(width    , std::dec);
        stats[1]  = toString(height   , std::dec);
        stats[2]  = toString(sampleTimer->totalTime, std::dec);
        stats[3]  = toString(totalKernelTime, std::dec);

        printStatistics(strArray, stats, 5);
    }
}

int TwoDimConvolution::cleanup()
{
    // Releases OpenCL resources (Context, Memory etc.)
    cl_int status;

    status = clReleaseKernel(kernel);
    CHECK_OPENCL_ERROR(status, "clReleaseKernel failed.(kernel)");

    status = clReleaseProgram(program);
    CHECK_OPENCL_ERROR(status, "clReleaseProgram failed.(program)");

    status = clReleaseMemObject(inputBuffer);
    CHECK_OPENCL_ERROR(status, "clReleaseMemObject failed.(inputBuffer)");

    status = clReleaseMemObject(outputBuffer);
    CHECK_OPENCL_ERROR(status, "clReleaseMemObject failed.(outputBuffer)");

    status = clReleaseMemObject(mInput);
    CHECK_OPENCL_ERROR(status, "clReleaseMemObject failed.(mInput)");

    status = clReleaseCommandQueue(commandQueue);
    CHECK_OPENCL_ERROR(status, "clReleaseCommandQueue failed.(commandQueue)");

    status = clReleaseContext(context);
    CHECK_OPENCL_ERROR(status, "clReleaseContext failed.(context)");

    // release program resources (input memory etc.)
    FREE(input);
    FREE(output);
    FREE(input_m);
    FREE(verificationOutput);
    FREE(devices);

    return SDK_SUCCESS;
}

int
main(int argc, char * argv[])
{
    TwoDimConvolution clTwoDimConvolution;

    if (clTwoDimConvolution.initialize() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    if (clTwoDimConvolution.sampleArgs->parseCommandLine(argc, argv) != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    if(clTwoDimConvolution.sampleArgs->isDumpBinaryEnabled())
    {
        return clTwoDimConvolution.genBinaryImage();
    }

    if (clTwoDimConvolution.setup() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    if (clTwoDimConvolution.run() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    if (clTwoDimConvolution.verifyResults() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    if (clTwoDimConvolution.cleanup() != SDK_SUCCESS)
    {
        return SDK_FAILURE;
    }

    clTwoDimConvolution.printStats();
    return SDK_SUCCESS;
}
