{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a29b8125-39c3-47ad-9fbb-d2f7e000e93e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Writing lab/matrix_mul.cpp\n"
     ]
    }
   ],
   "source": [
    "%%writefile lab/matrix_mul.cpp\n",
    "#include <chrono>\n",
    "#include <iostream>\n",
    "#include <CL/sycl.hpp>\n",
    "#define random_float() (rand() / double(RAND_MAX))\n",
    "using namespace std;\n",
    "using namespace sycl;\n",
    "\n",
    "// 使用gpu计算矩阵相乘的结果和耗时\n",
    "double gpu_kernel(float *A, float *B, float *C, int M, int N, int K, int block, sycl::queue &q) {\n",
    "    // 对矩阵取整，保证矩阵可以按照block整分\n",
    "    auto rows = (M + block - 1) / block * block;\n",
    "    auto cols = (N + block - 1) / block * block;\n",
    "\n",
    "    // 定义块的范围和整个矩阵的范围\n",
    "    auto local_ndrange  = range<2>(block, block);\n",
    "    auto global_ndrange = range<2>(rows, cols);\n",
    "    double time_cost = 0;\n",
    "\n",
    "    // 矩阵相乘，并行计算每一块的结果\n",
    "    auto e = q.submit([&](sycl::handler &h) {\n",
    "        h.parallel_for<class k_name_t>(sycl::nd_range<2>(global_ndrange, local_ndrange), [=](sycl::nd_item<2> index) {\n",
    "            // 获取当前计算的行和列\n",
    "            int row = index.get_local_id(0) + index.get_group(0) * block;\n",
    "            int col = index.get_local_id(1) + index.get_group(1) * block;\n",
    "            float sum = 0;\n",
    "            // 计算行*列的值\n",
    "            for (int i = 0; i < K; i++) {\n",
    "                sum += A[row * K + i] * B[i * N  + col];\n",
    "            }\n",
    "            C[row * N + col] = sum;\n",
    "        });\n",
    "    });\n",
    "    e.wait();\n",
    "    // 计算运行时间并返回\n",
    "    time_cost += (e.get_profiling_info<info::event_profiling::command_end>() - e.get_profiling_info<info::event_profiling::command_start>()) /1000/1000;\n",
    "    return(time_cost);\n",
    "}\n",
    "\n",
    "// 使用cpu串行计算矩阵相乘的结果和耗时\n",
    "double cpu_kernel(float *A, float *B, float *C, int M, int N, int K) {\n",
    "\n",
    "    double time_cost = 0.0;\n",
    "    std::chrono::high_resolution_clock::time_point s, e;\n",
    "    // 串行计算矩阵相乘\n",
    "    s = std::chrono::high_resolution_clock::now();\n",
    "    for(int i = 0; i < M; i++) {\n",
    "        for(int j = 0; j < N; j++) {\n",
    "            float sum = 0;\n",
    "            for(int k = 0; k < K; k++) {\n",
    "                sum +=  A[i * K + k] * B[k * N  + j];\n",
    "            }\n",
    "            C[i * N + j] = sum;\n",
    "        }\n",
    "    }\n",
    "    e = std::chrono::high_resolution_clock::now();\n",
    "    // 计算运行时间并返回\n",
    "    time_cost = std::chrono::duration<float, std::milli>(e - s).count();\n",
    "    return(time_cost);\n",
    "}\n",
    "\n",
    "// 判断cpu和gpu计算结果是否正确\n",
    "int judge(float *C_cpu, float *C_gpu, int l){\n",
    "    int x = 0;\n",
    "    for(int i = 0; i < l; i++) {\n",
    "        if( fabs(C_cpu[i] - C_gpu[i]) > 1e-3) {\n",
    "            x++;\n",
    "            printf(\"\\n%lf, %lf\", C_cpu[i], C_gpu[i]);\n",
    "        }\n",
    "    }\n",
    "    return(x);\n",
    "}\n",
    "\n",
    "void gemm(const int M,const int N,const int K,const int block,const int iterations,sycl::queue &q) {\n",
    "    // 输出基本信息\n",
    "    cout << \"\\n矩阵相乘: A(\" << M << \"*\" << K << \")* B(\" << K << \"*\" << N << \") = C(\" << M << \"*\" << N << \")\\n\";\n",
    "    cout << \"分块间距: \" << block << std::endl;\n",
    "    // 分配内存\n",
    "    auto A = malloc_shared<float>(M * K, q);\n",
    "    auto B = malloc_shared<float>(K * N, q);\n",
    "    auto C_gpu = malloc_shared<float>(M * N, q);\n",
    "    auto C_cpu = malloc_host<float>(M * N, q);\n",
    "\n",
    "    // 初始化输入矩阵\n",
    "    for(int i=0; i < M * K; i++)\n",
    "        A[i] = random_float();\n",
    "    for (int i = 0; i < K * N; i++)\n",
    "        B[i] = random_float();\n",
    "    for(int i=0; i < M * N; i++) {\n",
    "        C_gpu[i] = 0;\n",
    "        C_cpu[i] = 0;\n",
    "    }\n",
    "\n",
    "\n",
    "    double time_cost_gpu = 0;\n",
    "    double time_cost_cpu = 0;\n",
    "\n",
    "    // 使用gpu计算矩阵相乘的结果和耗时\n",
    "    int warmup = 10;\n",
    "    for (int a = 0; a < iterations + warmup; a++) {\n",
    "        float duration = gpu_kernel(A, B, C_gpu, M, N, K, block, q);\n",
    "        if(a >= warmup) time_cost_gpu += duration;\n",
    "    }\n",
    "    time_cost_gpu = time_cost_gpu / iterations;\n",
    "\n",
    "    // 使用cpu计算矩阵相乘的结果和耗时\n",
    "    warmup = 2;\n",
    "    for(int a = 0; a < iterations/2 + warmup; a++) {\n",
    "        float duration = cpu_kernel(A, B, C_cpu, M, N, K);\n",
    "        if(a >= warmup) time_cost_cpu += duration;\n",
    "    }\n",
    "    time_cost_cpu = time_cost_cpu / iterations/2;\n",
    "\n",
    "    // 比较cpu和gpu运算结果是否不同\n",
    "    int diff = 0;\n",
    "    diff = judge(C_cpu, C_gpu, M*N);\n",
    "    if(diff > 0) cout<<\"\\n矩阵相乘的串行计算结果与并行计算结果有\\n\"<<diff<<\"处差异\";\n",
    "    cout << \"\\n并行计算(GPU)耗时: \" << time_cost_gpu << \" ms\\n\";\n",
    "    cout << \"串行计算(CPU)耗时: \" << time_cost_cpu << \" ms\\n\";\n",
    "\n",
    "    free(A, q);\n",
    "    free(B, q);\n",
    "    free(C_gpu, q);\n",
    "    free(C_cpu, q);\n",
    "\n",
    "}\n",
    "\n",
    "int main() {\n",
    "\n",
    "    auto plist = cl::sycl::property_list {cl::sycl::property::queue::enable_profiling()};\n",
    "    queue q( cl::sycl::gpu_selector{} , plist);\n",
    "\n",
    "    int M = 1024;\n",
    "    int N = 1024;\n",
    "    int K = 1024;\n",
    "    int block = 8;\n",
    "    gemm(M, N, K, block, 10, q);\n",
    "\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "24593547-3cca-4f43-b553-17692b58224d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Job has been submitted to Intel(R) DevCloud and will execute soon.\n",
      "\n",
      "Job ID                    Name             User            Time Use S Queue\n",
      "------------------------- ---------------- --------------- -------- - -----\n",
      "2177579.v-qsvr-1           ...ub-singleuser u175032         00:00:16 R jupyterhub     \n",
      "2177719.v-qsvr-1           matrix_mul.sh    u175032                0 Q batch          \n",
      "\n",
      "Waiting for Output ████████████████████████████████ Done⬇\n",
      "\n",
      "########################################################################\n",
      "#      Date:           Wed 08 Feb 2023 01:21:03 AM PST\n",
      "#    Job ID:           2177719.v-qsvr-1.aidevcloud\n",
      "#      User:           u175032\n",
      "# Resources:           cput=75:00:00,neednodes=1:gpu:ppn=2,nodes=1:gpu:ppn=2,walltime=06:00:00\n",
      "########################################################################\n",
      "\n",
      "## u175032 is compiling DPCPP_Essentials Module1 -- oneAPI Intro sample - 1 of 1 matrix_mul.cpp\n",
      "\n",
      "矩阵相乘: A(1024*1024)* B(1024*1024) = C(1024*1024)\n",
      "分块间距: 8\n",
      "\n",
      "并行计算(GPU)耗时: 71 ms\n",
      "串行计算(CPU)耗时: 442.627 ms\n",
      "\n",
      "########################################################################\n",
      "# End of output for job 2177719.v-qsvr-1.aidevcloud\n",
      "# Date: Wed 08 Feb 2023 01:21:25 AM PST\n",
      "########################################################################\n",
      "\n",
      "lab/matrix_mul.cpp:130:24: warning: 'gpu_selector' is deprecated: Use the callable sycl::gpu_selector_v instead. [-Wdeprecated-declarations]\n",
      "    queue q( cl::sycl::gpu_selector{} , plist);\n",
      "                       ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/device_selector.hpp:62:21: note: 'gpu_selector' has been explicitly marked deprecated here\n",
      "class __SYCL_EXPORT __SYCL2020_DEPRECATED(\n",
      "                    ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/detail/defines_elementary.hpp:52:40: note: expanded from macro '__SYCL2020_DEPRECATED'\n",
      "#define __SYCL2020_DEPRECATED(message) __SYCL_DEPRECATED(message)\n",
      "                                       ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/detail/defines_elementary.hpp:43:38: note: expanded from macro '__SYCL_DEPRECATED'\n",
      "#define __SYCL_DEPRECATED(message) [[deprecated(message)]]\n",
      "                                     ^\n",
      "1 warning generated.\n",
      "lab/matrix_mul.cpp:130:24: warning: 'gpu_selector' is deprecated: Use the callable sycl::gpu_selector_v instead. [-Wdeprecated-declarations]\n",
      "    queue q( cl::sycl::gpu_selector{} , plist);\n",
      "                       ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/device_selector.hpp:62:21: note: 'gpu_selector' has been explicitly marked deprecated here\n",
      "class __SYCL_EXPORT __SYCL2020_DEPRECATED(\n",
      "                    ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/detail/defines_elementary.hpp:52:40: note: expanded from macro '__SYCL2020_DEPRECATED'\n",
      "#define __SYCL2020_DEPRECATED(message) __SYCL_DEPRECATED(message)\n",
      "                                       ^\n",
      "/glob/development-tools/versions/oneapi/2023.0.1/oneapi/compiler/2023.0.0/linux/bin-llvm/../include/sycl/detail/defines_elementary.hpp:43:38: note: expanded from macro '__SYCL_DEPRECATED'\n",
      "#define __SYCL_DEPRECATED(message) [[deprecated(message)]]\n",
      "                                     ^\n",
      "1 warning generated.\n",
      "Job Completed in 32 seconds.\n"
     ]
    }
   ],
   "source": [
    "! chmod 755 q; chmod 755 matrix_mul.sh;if [ -x \"$(command -v qsub)\" ]; then ./q matrix_mul.sh; else ./matrix_mul.sh; fi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b35fda8-a55e-45ab-bc5f-5a951dcfca92",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (Intel® oneAPI 2023.0)",
   "language": "python",
   "name": "c009-intel_distribution_of_python_3_oneapi-beta05-python"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
