﻿//
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
//    int i = threadIdx.x;
//    c[i] = a[i] + b[i];
//}
//
//int main()
//{
//    const int arraySize = 5;
//    const int a[arraySize] = { 1, 2, 3, 4, 5 };
//    const int b[arraySize] = { 10, 20, 30, 40, 50 };
//    int c[arraySize] = { 0 };
//
//    // Add vectors in parallel.
//    cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "addWithCuda failed!");
//        return 1;
//    }
//
//    printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
//        c[0], c[1], c[2], c[3], c[4]);
//
//    // cudaDeviceReset must be called before exiting in order for profiling and
//    // tracing tools such as Nsight and Visual Profiler to show complete traces.
//    cudaStatus = cudaDeviceReset();
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaDeviceReset failed!");
//        return 1;
//    }
//
//    return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
//    int *dev_a = 0;
//    int *dev_b = 0;
//    int *dev_c = 0;
//    cudaError_t cudaStatus;
//
//    // Choose which GPU to run on, change this on a multi-GPU system.
//    cudaStatus = cudaSetDevice(0);
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
//        goto Error;
//    }
//
//    // Allocate GPU buffers for three vectors (two input, one output)    .
//    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMalloc failed!");
//        goto Error;
//    }
//
//    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMalloc failed!");
//        goto Error;
//    }
//
//    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMalloc failed!");
//        goto Error;
//    }
//
//    // Copy input vectors from host memory to GPU buffers.
//    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMemcpy failed!");
//        goto Error;
//    }
//
//    cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMemcpy failed!");
//        goto Error;
//    }
//
//    // Launch a kernel on the GPU with one thread for each element.
//    addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
//    // Check for any errors launching the kernel
//    cudaStatus = cudaGetLastError();
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
//        goto Error;
//    }
//    
//    // cudaDeviceSynchronize waits for the kernel to finish, and returns
//    // any errors encountered during the launch.
//    cudaStatus = cudaDeviceSynchronize();
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//        goto Error;
//    }
//
//    // Copy output vector from GPU buffer to host memory.
//    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
//    if (cudaStatus != cudaSuccess) {
//        fprintf(stderr, "cudaMemcpy failed!");
//        goto Error;
//    }
//
//Error:
//    cudaFree(dev_c);
//    cudaFree(dev_a);
//    cudaFree(dev_b);
//    
//    return cudaStatus;
//}
#include<iostream>
#include<fstream>
#include<math.h>
#include<stdio.h>
#include<stdlib.h>
#include <iomanip>
#include"cuda_runtime.h"
#include"device_launch_parameters.h"


using namespace std;

#define size 10000  //规模
#define k 30       //分成类数
#define c 1
#define P 3

float base[size][2];//数据信息，暂且只考虑二维
float center[k][2];//质心信息
float distancep[size][k];//点距离其所属类质心的距离
int result[size];//点的所属类

void getcenter();
float getd();
void cuda();
void cudadivide();

float resulttime;

int main() {
    //存数取数代码
    /*ofstream out;
    out.open("source.txt");
    for (int i = 0; i < 2 * size; i++)
        out << rand() % 1000 << " ";
    out.close();*/

    //ifstream in;
    //in.open("source.txt");
    //int a;
    //in >> a;
    //cout << a;
    //in.close();
    //赋值函数
    ifstream in;
    in.open("source.txt");
    for (int i = 0; i < size; i++) {
        float x, y;
        in >> x >> y;
        base[i][0] = x;
        base[i][1] = y;
    }
    cuda();
    return 0;
}

__global__ void dividepart1(float* gpudata, float* gpucenter, float* gpudistance) {
    /*int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = gridDim.x * blockDim.x;
    int i;
    for (i = idx; i < size; i += stride)
    {
        for (int j = 0; j < k; j++)
        {
            float temp1 = (gpudata[2 * i] - gpucenter[2 * j]) * (gpudata[2 * i] - gpucenter[2 * j]) + (gpudata[2 * i + 1] - gpucenter[2 * j + 1]) * (gpudata[2 * i + 1] - gpucenter[2 * j + 1]);
            gpudistance[i * k + j] = temp1;
        }
    }*/
}

void cuda() {
    cout << "cuda:size=" << size << endl;
    int num = 0;
    resulttime = 0.0;
    while (num != c) {
        num++;

        for (int i = 0; i < k; i++) {
            int a = size / k;           
            center[i][0] = base[i * a][0];
            center[i][1] = base[i * a][1];           
        }

        float* tempforbase = new float[2 * size];
        for (int i = 0; i < size; i++) {
            tempforbase[2 * i] = base[i][0];
            tempforbase[2 * i + 1] = base[i][1];
        }
        float* tempforcenter = new float[2 * k];
        for (int i = 0; i < k; i++) {
            tempforcenter[2 * i] = center[i][0];
            tempforcenter[2 * i + 1] = center[i][1];
        }

        //分别对应原有使用的数组复制gpu版本
        //float base[size][2];//数据信息，暂且只考虑二维
        //float center[k][2];//质心信息
        //float distancep[size][k];//点距离其所属类质心的距离
        cudaError_t ret;//用于错误检查，当 CUDA 接口调用成功会返回 cudaSucess
        float* gpudata;
        int sizeofgpudata = 2 * size * sizeof(float);
        ret = cudaMalloc(&gpudata, sizeofgpudata);//分配显存空间
        if (ret != cudaSuccess) {
            printf("cudaMalloc gpudata failed!\n");
        }
        float* gpucenter;
        int sizeofgpucenter = 2 * k * sizeof(float);
        ret = cudaMalloc(&gpucenter, sizeofgpucenter);//分配显存空间

        float* gpudistance;
        int sizeofgpudistance = size * k * sizeof(float);
        cudaMalloc(&gpudistance, sizeofgpudistance);//分配显存空间

        cudaEvent_t start, stop;//计时器
        float elapsedTime = 0.0;
        cudaEventCreate(&start);
        cudaEventCreate(&stop);
        cudaEventRecord(start, 0);//开始计时

        cudaMemcpy(gpudata, tempforbase, sizeofgpudata, cudaMemcpyHostToDevice);//将数据传输至 GPU 端
        ret = cudaMemcpy(gpucenter, tempforcenter, sizeofgpucenter, cudaMemcpyHostToDevice);//将数据传输至 GPU 端
        
        dividepart1 << <32768, 1 >> > (gpudata, gpucenter, gpudistance);
        
        ret = cudaMemcpy(tempforbase, gpudata, sizeofgpudata, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        ret = cudaMemcpy(tempforcenter, gpucenter, sizeofgpucenter, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        if (ret != cudaSuccess) {
            printf("cudaMemcpyDeviceToHost failed!\n");        
        }
        cudaDeviceSynchronize();

        cudaEventRecord(stop, 0);
        cudaEventSynchronize(stop);//停止计时
        cudaEventElapsedTime(&elapsedTime, start, stop);
        resulttime += elapsedTime;

        //for (int i = 0; i < size; i++) {
        //    for (int j = 0; j < k; j++)
        //        distancep[i][j] = gpudistance[i * k + j];
        //}
        //cudadivide();
        //float d1 = getd();
        //getcenter();
        ////
        //cudaEvent_t start1, stop1;//计时器
        //elapsedTime = 0.0;
        //cudaEventCreate(&start1);
        //cudaEventCreate(&stop1);
        //cudaEventRecord(start1, 0);//开始计时

        //cudaMemcpy(gpudata, tempforbase, sizeofgpudata, cudaMemcpyHostToDevice);//将数据传输至 GPU 端
        //ret = cudaMemcpy(gpucenter, tempforcenter, sizeofgpucenter, cudaMemcpyHostToDevice);//将数据传输至 GPU 端

        //dividepart1 << <32, 512 >> > (gpudata, gpucenter, gpudistance);

        //ret = cudaMemcpy(tempforbase, gpudata, sizeofgpudata, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        //ret = cudaMemcpy(tempforcenter, gpucenter, sizeofgpucenter, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        //if (ret != cudaSuccess) {
        //    printf("cudaMemcpyDeviceToHost failed!\n");
        //}
        //cudaDeviceSynchronize();

        //cudaEventRecord(stop1, 0);
        //cudaEventSynchronize(stop1);//停止计时
        //cudaEventElapsedTime(&elapsedTime, start1, stop1);
        //resulttime += elapsedTime;
        //for (int i = 0; i < size; i++) {
        //    for (int j = 0; j < k; j++)
        //        distancep[i][j] = gpudistance[i * k + j];
        //}
        //cudadivide();

        //float d2 = getd();
        //getcenter();
        //while (d1 - d2 != 0) {
        //    d1 = d2;
        //    getcenter();

        //    cudaEvent_t start, stop;//计时器
        //    float elapsedTime = 0.0;
        //    cudaEventCreate(&start);
        //    cudaEventCreate(&stop);
        //    cudaEventRecord(start, 0);//开始计时

        //    cudaMemcpy(gpudata, tempforbase, sizeofgpudata, cudaMemcpyHostToDevice);//将数据传输至 GPU 端
        //    ret = cudaMemcpy(gpucenter, tempforcenter, sizeofgpucenter, cudaMemcpyHostToDevice);//将数据传输至 GPU 端

        //    dividepart1 << <32, 512 >> > (gpudata, gpucenter, gpudistance);

        //    ret = cudaMemcpy(tempforbase, gpudata, sizeofgpudata, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        //    ret = cudaMemcpy(tempforcenter, gpucenter, sizeofgpucenter, cudaMemcpyDeviceToHost);//将数据传回 CPU 端
        //    if (ret != cudaSuccess) {
        //        printf("cudaMemcpyDeviceToHost failed!\n");
        //    }
        //    cudaDeviceSynchronize();

        //    cudaEventRecord(stop, 0);
        //    cudaEventSynchronize(stop);//停止计时
        //    cudaEventElapsedTime(&elapsedTime, start, stop);
        //    resulttime += elapsedTime;
        //    for (int i = 0; i < size; i++) {
        //        for (int j = 0; j < k; j++)
        //            distancep[i][j] = gpudistance[i * k + j];
        //    }
        //    cudadivide();
        //    d2 = getd();
        //}



        cout << endl;
        cout << resulttime;


    }
    return;
}

void cudadivide() {
    for (int i = 0; i < size; i++) {
        int res = 0;
        for (int j = 0; j < k; j++) {
            if (distancep[i][res] > distancep[i][j])
                res = j;
        }
        result[i] = res;
    }
    return;
}
float getd() {
    float re = 0;
    float* dist = new float[k];
    for (int i = 0; i < k; i++)
        dist[i] = 0;
    for (int i = 0; i < size; i++) {
        int num = result[i];
        dist[num] += (base[i][0] - center[num][0]) * (base[i][0] - center[num][0]) + (base[i][1] - center[num][1]) * (base[i][1] - center[num][1]);
    }
    for (int i = 0; i < k; i++) {
        re += dist[i];
    }
    delete[]dist;
    return re;
}
void getcenter() {
    int* count = new int[k];
    float* distx = new float[k];
    float* disty = new float[k];
    for (int i = 0; i < k; i++) {
        distx[i] = 0;
        disty[i] = 0;
        count[i] = 0;
    }
    for (int i = 0; i < size; i++) {
        int num = result[i];
        count[num]++;
        distx[num] += base[i][0];
        disty[num] += base[i][1];
    }
    for (int i = 0; i < k; i++) {
        center[i][0] = distx[i] / count[i];
        center[i][1] = disty[i] / count[i];
    }
    delete[]count;
    delete[]distx;
    delete[]disty;
    return;
}
