{
 "metadata": {
  "name": "",
  "signature": "sha256:33ba8540282fd18906203d183f494844ec06afcc6787ed5d9b621bb0e28dcf08"
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "The cell below just loads the style of the notebook. Please ignore it."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from IPython.core.display import HTML\n",
      "def css_styling():\n",
      "    styles = open('./style/mesnardo_style.css', 'r').read()\n",
      "    return HTML(styles)\n",
      "css_styling()"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "html": [
        "<link href='http://fonts.googleapis.com/css?family=Alegreya+Sans:100,300,400,500,700,800,900,100italic,300italic,400italic,500italic,700italic,800italic,900italic' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Arvo:400,700,400italic' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=PT+Mono' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Shadows+Into+Light' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Nixie+One' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Raleway:400,200,800,500' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Muli:300,400,300italic,400italic' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Roboto:400,400italic,700,900' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700,800' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=Istok+Web:400,700,400italic' rel='stylesheet' type='text/css'>\n",
        "<link href='http://fonts.googleapis.com/css?family=PT+Sans:400,700,400italic,700italic' rel='stylesheet' type='text/css'>\n",
        "<style>\n",
        "\n",
        "@font-face {\n",
        "    font-family: \"Computer Modern\";\n",
        "    src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf');\n",
        "}\n",
        "\n",
        "#notebook_panel { /* main background */\n",
        "\tbackground: #FFFFFF;\n",
        "}\n",
        "\n",
        "div.cell { /* set cell width to about 80 chars */\n",
        "    width: 800px;\n",
        "\tmargin-left: 0% !important;\n",
        "\tmargin-right: 0% !important;\n",
        "}\n",
        "\n",
        "div #notebook { /* centre the content */\n",
        "    background: #FFFFFF; /* white background for content */\n",
        "    width: 1000px;\n",
        "    margin: auto;\n",
        "    padding-left: 0.5em;\n",
        "}\n",
        "\n",
        "#notebook li { /* More space between bullet points */\n",
        "margin-top:0.8em;\n",
        "}\n",
        "\n",
        "/* draw border around running cells */\n",
        "div.cell.border-box-sizing.code_cell.running { \n",
        "    border: 1px solid #003366;\n",
        "}\n",
        "\n",
        "/* Put a solid color box around each cell and its output, visually linking them*/\n",
        "div.cell.code_cell {\n",
        "    background-color: #FFFFFF; \n",
        "    border-radius: 0px; \n",
        "    padding: 0.5em;\n",
        "    margin-top: 1em;\n",
        "}\n",
        "\n",
        "div.text_cell_render{\n",
        "    font-family: 'Alegreya Sans', sans-serif;\n",
        "    line-height: 160%;\n",
        "    font-size: 120%;\n",
        "    font-weight: 400;\n",
        "    width: 800px;\n",
        "    margin-left: auto;\n",
        "    margin-right:auto;\n",
        "}\n",
        "\n",
        "\n",
        "/* Formatting for header cells */\n",
        ".text_cell_render h1 {\n",
        "    font-family: 'Nixie One', serif;\n",
        "    font-style: regular;\n",
        "    font-weight: 800;    \n",
        "    font-size: 40pt;\n",
        "    line-height: 100%;\n",
        "    color: #003366;\n",
        "    margin-bottom: 0.5em;\n",
        "    margin-top: 0.5em;\n",
        "    display: block;\n",
        "}\t\n",
        ".text_cell_render h2 {\n",
        "    font-family: 'Nixie One', serif;\n",
        "\tfont-style: regular;\n",
        "    font-weight: 800;\n",
        "    font-size: 25pt;\n",
        "    line-height: 100%;\n",
        "    color: #003366;\n",
        "    margin-bottom: 0.5em;\n",
        "    margin-top: 1.0em;\n",
        "    display: block;\n",
        "}\n",
        "\n",
        ".text_cell_render h3 {\n",
        "    font-family: 'Nixie One', serif;\n",
        "\tfont-style: regular;\n",
        "\tfont-size: 18pt;\n",
        "    font-weight: 800;\n",
        "\tline-height: 100%;\n",
        "\tcolor: #003366;\n",
        "    margin-bottom: 0.5em;\n",
        "\tmargin-top: 1.0em;\n",
        "    display: block;\n",
        "}\n",
        "\n",
        ".text_cell_render h4 {\n",
        "    font-family: 'Nixie One', serif;\n",
        "\tfont-style: italic;\n",
        "\tfont-size: 14pt;\n",
        "\tfont-weight: 800;\n",
        "\tline-height: 100%;\n",
        "\tcolor: #003366;\n",
        "\tmargin-bottom: 0.5em;\n",
        "\tmargin-top: 1.0em;\n",
        "\tdisplay: block;\n",
        "}\n",
        "\n",
        ".text_cell_render h5 {\n",
        "    font-family: 'Arvo', sans-serif;\n",
        "    font-weight: 400;\n",
        "    font-size: 12pt;\n",
        "    color: grey;\n",
        "    font-style: italic;\n",
        "    margin-bottom: 0.2em;\n",
        "    margin-top: 1.0em;\n",
        "    display: block;\n",
        "}\n",
        "\n",
        ".text_cell_render h6 {\n",
        "    font-family: 'PT Mono', sans-serif;\n",
        "    font-weight: 300;\n",
        "    font-size: 10pt;\n",
        "    line-height: 100%;\n",
        "    color: grey;\n",
        "    margin-bottom: 1px;\n",
        "    margin-top: 1px;\n",
        "}\n",
        "\n",
        ".CodeMirror{\n",
        "        font-family: \"PT Mono\";\n",
        "        font-size: 90%;\n",
        "}\n",
        "\n",
        "</style>\n"
       ],
       "metadata": {},
       "output_type": "pyout",
       "prompt_number": 1,
       "text": [
        "<IPython.core.display.HTML at 0x7fec44313bd0>"
       ]
      }
     ],
     "prompt_number": 1
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---"
     ]
    },
    {
     "cell_type": "heading",
     "level": 1,
     "metadata": {},
     "source": [
      "Single Core Optimization of the fast Fourier transform"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Single-core optimization ([Naty](https://github.com/barbagroup/teaching-materials/blob/master/Single_Core_Optimization/Single_Core_Optimization.md))\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "* Increasing CPU frequency results in **\"free\" speed-up** for numerical softwares, i.e., a numerical code runs faster on a CPU with higher clock spedd without any extra effort.\n",
      "\n",
      "* The increase in processor speed has not been accompanied by a similar increase in memory speed. Moving data from and to memory has become the bottleneck.\n",
      "\n",
      "The memory hierarchy aims to address this problem:\n",
      "\n",
      "<center><img src=\"./images/memory_hierarchy.png\" style=\"width: 600px;\"/></center>\n",
      "\n",
      "* Profiling code with **gprof** to show the bottlenecks.\n",
      "\n",
      "* Description of the different compiler flags.\n",
      "\n",
      "* Introduction of **cache blocking** with the matrix transpose."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Cache optimization ([Anush](https://github.com/barbagroup/teaching-materials/blob/master/Single_Core_Optimization/Cache_Optimization.md))\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "* Definitions: memory bandwidth, FLOPS, arithmetic intensity, CPU-bound and memory-bound problems.\n",
      "\n",
      "* Notion of **temporal locality** and **spatial locality**.\n",
      "\n",
      "* How cache works (cache-line size, cache miss and cache hit, associativity).\n",
      "\n",
      "* **Cache blocking**, with applications to reading elements of an array several times and computing dense matrix-matrix multiplication."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Addendum\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Here are some additional ideas, tricks, skills, methods... taken from Chellappa et al. (2008)[1], on how single-core optimization can be applied to numerical codes. \n",
      "\n",
      "In their paper, various optimization methods are classified into four categories:\n",
      "\n",
      "* **performance-conscious programming**,\n",
      "* **optimizations for cache**,\n",
      "* **optimizations for the registers and CPU**,\n",
      "* **parameter-based performance tuning**."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### Performance-conscious programming\n",
      "\n",
      "* Choose **C language** for high performance implementation.\n",
      "* Avoid Object-orineted features for performance-critical parts (operator overloading and late binding lead to performance deterioration).\n",
      "* Use **one-dimensional arrays of scalar variables** instead of multi-dimnesional ones, especially when the sizes are unknowns at the compile time.\n",
      "* To represent vectors of complex numbers, avoid using a `struct`. Choose vectors of real numbers twice the size should be used, with the real and imaginary parts appearing as pairs along the vector."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### Cache optimization\n",
      "\n",
      "The objective is to take advantage of the memory hierarchy and reuse loaded data as much as possible. Optmization at the cache level can fall into three groups:\n",
      "\n",
      "* **Blocking**\n",
      "* **Loop merging**\n",
      "* **Buffering**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Blocking\n",
      "\n",
      "Instead of operating on entire rows and columns of an array, blocked algorithms operate on **subsets** or **\"blocks\"**, so that data loaded into the faster levels of the memory are reused (spatial locality).\n",
      "\n",
      "In other words, blocking consists in working on data in chunks that fit into the cache, to minimize the miss/hit ratio.\n",
      "\n",
      "For example, loops in loop nests, may be split and swapped (a transformation called tiling) so that the working set of the inner loops fits into the targeted memory hierarchy level, whereas the outer loop jumps from block to block.\n",
      "\n",
      "Example of the blocked matrix-matrix multiplication:\n",
      "```cpp\n",
      "void MMMBlocked(float *C, float *A, float *B, int N)\n",
      "{\n",
      "\tfor(int i=0; i<N; i+=BS)\n",
      "\t{\t\n",
      "\t\tfor(int j=0; j<N; j+=BS)\n",
      "\t\t{\n",
      "\t\t\tfor(int k=0; k<N; k+=BS)\n",
      "\t\t\t{\n",
      "\t\t\t\tfor(int ii=i; ii<i+BS; ii++)\n",
      "\t\t\t\t{\n",
      "\t\t\t\t\tfor(int jj=j; jj<j+BS; jj++)\n",
      "\t\t\t\t\t{\n",
      "\t\t\t\t\t\tfor(int kk=k; kk<k+BS; kk++)\n",
      "\t\t\t\t\t\t{\n",
      "\t\t\t\t\t\t\tC[ii*N+jj] += A[ii*N+kk] * B[kk*N+jj];\n",
      "\t\t\t\t\t\t}\n",
      "\t\t\t\t\t}\n",
      "\t\t\t\t}\n",
      "\t\t\t}\n",
      "\t\t}\n",
      "\t}\n",
      "}\n",
      "```\n",
      "\n",
      "* Another way to achieve blocking is to choose a **recursive algorithm** to start with. \n",
      "\n",
      "* Recursive algorithms divide a large problem into smaller problems that typically operate on subsets of the data. \n",
      "\n",
      "* If designed and parametrized well, at some level, all sub-problems fit into the targeted memory level and blocking is achieved implicitly."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Loop-merging\n",
      "\n",
      "* Numerical algorithms often have multiple stages. Each stage accesses the whole data set before the next stage can start, which produces multiple sweeps through the working set. If the working set does not fit into the cache this can dramatically reduce performance.\n",
      "\n",
      "* In some algorithms, the dependencies do not require that all operations of a previous stage are completed before any operation in a later stage can be started. If this is the case, loops can be merged and the number of passes through the working set can be reduced.\n",
      "\n",
      "```cpp\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    y[2*i] = x[2*i] + x[2*i+1];\n",
      "}\n",
      "\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    y[2*i+1] = x[2*i] - x[2*i+1];\n",
      "}\n",
      "```\n",
      "\n",
      "The two stages, computing even indices and computing odd indices, are independent; they can be merged into one single loop:\n",
      "\n",
      "```cpp\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    y[2*i] = x[2*i] + x[2*i+1];\n",
      "    y[2*i+1] = x[2*i] - x[2*i+1];\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Buffering\n",
      "\n",
      "* When working on multi-dimensional data like matrices, logically close elements can be far from each other in linearized memory. \n",
      "\n",
      "* For instance, matrix elements in one column are stored at a distance equal to the number of columns of that matrix. \n",
      "\n",
      "* Cache associativity and cache line size get into conflict if one wants to hold, for instance, a small rectangular section of such a matrix in cache, leading to cache trashing. \n",
      "\n",
      "* This means the elements accessed by the kernel are mapped to the same cache locations and hence are moved in and out during computation. \n",
      "\n",
      "* One simple solution is to copy the desired block into a contiguous temporary buffer. That incurs a one-time cost but allievates cache trashing."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### CPU and register level optimization\n",
      "\n",
      "Optimization for the highest level in the memory hierarchy, the registers, is to some extent similar to optimizations for the cache."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Blocking\n",
      "\n",
      "* Cache blocking reduces traffic between the memory and the cache.\n",
      "\n",
      "* Register blocking reduces traffic between the cache and the CPU.\n",
      "\n",
      "* Register-level blocking partitions the data into chunks on which the computation can be performed within the register set.\n",
      "\n",
      "```cpp\n",
      "for (int i=0; i<N; i++)\n",
      "{\n",
      "    for (int k=0; k<N; k++)\n",
      "    {\n",
      "        r = A[i*N+k]; // register allocated\n",
      "        for (int j=0; j<N; j++)\n",
      "        {\n",
      "            C[i*N+j] += r * B[k*N+j]\n",
      "        }\n",
      "    }\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Unrolling and Scheduling\n",
      "\n",
      "* The objective is to keep the pipeline full.\n",
      "\n",
      "* Unrolling produces larger basic blocks, but allows the compiler to apply strength reduction to simplify expressions and to better utilize register file.\n",
      "\n",
      "* Too much unrolling may increase the code size and overflow the instruction cache.\n",
      "\n",
      "* Unrolling exposes an opportunity to perform instruction scheduling. With unrolled code, it becomes easy to determine data dependencies between instructions.\n",
      "\n",
      "* Parallelism among instructions must be exploited by finding sequences of unrelated instructions that can be overlapped in the pipeline.\n",
      "\n",
      "* Instruction scheduling is the process of rearranging code to include independent instructions in between two dependent instructions to minimize pipeline stalls.\n",
      "\n",
      "* Avoid stalls (CPU pipeline being stalled), a dependent instruction must be separated from the source instruction by a distance in clock equal to the pipeline latency of that source instruction."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Scalar replacement\n",
      "\n",
      "* In C compilers, pointer analysis is complicated. We should help the compiler with scalar replacements.\n",
      "\n",
      "* It is important to replace arrays that are fully inside the scope of the innermost loop by one automatic, scalar variable per array element.\n",
      "\n",
      "```cpp\n",
      "double t[2];\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    t[0] = x[2*i] + x[2*i+1];\n",
      "    t[1] = x[2*i] - x[2*i+1];\n",
      "    y[2*i] = t[0] * D[2*i];\n",
      "    y[2*i+1] = t[1] * D[2*i];\n",
      "}\n",
      "```\n",
      "\n",
      "Scalarizing `t` will result in code that the compiler can better optimize:\n",
      "\n",
      "```cpp\n",
      "double t0, t1;\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    t0 = x[2*i] + x[2*i+1];\n",
      "    t1 = x[2*i] - x[2*i+1];\n",
      "    y[2*i] = t0 * D[2*i];\n",
      "    y[2*i+1] = t1 * D[2*i];\n",
      "}\n",
      "```\n",
      "\n",
      "* The difference is that `t0` and `t1` are automatic variables and can be held in registers whereas the array t will be allocated in memory and loaded and stored from memory for each operation.\n",
      "\n",
      "* Similarly, scalar replacement can be applied to the input `x` and the data `D`:\n",
      "\n",
      "```cpp\n",
      "double t0, t1, x0, x1, D0;\n",
      "for (int i=0; i<8; i++)\n",
      "{\n",
      "    x0 = x[2*i];\n",
      "    x1 = x[2*i+1];\n",
      "    D0 = D[2*i];\n",
      "    t0 = x0 + x1;\n",
      "    t1 = x0 - x1;\n",
      "    y[2*i] = t0 * D0;\n",
      "    y[2*i+1] = t1 * D0;\n",
      "}\n",
      "```\n",
      "\n",
      "* If the value of `y[i]` is used as source in operations like `y[i] += t0`, then `y[i]` should also replaced by a scalar."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Precomputation of constants\n",
      "\n",
      "* In CPU-bound kernel, all constants that are known ahead of time should be precomputed at compile time or initialization time and stored in a data array. \n",
      "\n",
      "* At the execution time, the kernel simlpy loads the precomputed data instead of needing to invoke math libray functions.\n",
      "\n",
      "````cpp\n",
      "for (i=0; i<8; i++)\n",
      "    y[i] = x[i] * sin(M_PI * i / 8);\n",
      "````\n",
      "\n",
      "* The program contains a function call to the math library in the inner loop. \n",
      "\n",
      "* Calling `sin()` can cost multiple thousands of cycles on modern CPUs. \n",
      "\n",
      "* All the constants are known before entering the kernel and thus can be precomputed:\n",
      "\n",
      "````cpp\n",
      "static double D[8];\n",
      "void init()\n",
      "{\n",
      "    for (int i=0; i<8; i++)\n",
      "        D[i] = sin(M_PI * i / 8);\n",
      "}\n",
      "...\n",
      "// in the kernel\n",
      "for (i=0; i<8; i++)\n",
      "    y[i] = x[i] * D[i]\n",
      "````\n",
      "\n",
      "* The initialization needs to be called only once. If the kernel is used over and over again, precomputation results in enourmous savings."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ The Fourier transform\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "A physical process can be described either:\n",
      "\n",
      "* in the time-domain: $h(t)$\n",
      "* in the frequency domain: $H(f)$\n",
      "\n",
      "$h$ and $H$ are two representations of the same function. The Fourier transform equations allow to go from one to another representation:\n",
      "\n",
      "$$ H(f) = \\int_{-\\infty}^{\\infty} h(t) \\text{e}^{-2\\pi i ft} dt $$\n",
      "\n",
      "$$ h(t) = \\int_{-\\infty}^{\\infty} H(f) \\text{e}^{2\\pi i ft} df $$"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Discrete Fourier tranform (DFT)\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "In the most common situations, $h(t)$ is a **sampled data**, and we define the **discrete Fourier transform** as the aproximation of the integral by a discrete sum.\n",
      "\n",
      "If $h(t)$ is a sampled data of length $N$, intervaled with $\\Delta$:\n",
      "\n",
      "$$ h_k = h(t_k = k\\Delta) \\quad, \\quad 0\\leq k < N $$\n",
      "\n",
      "then, the discrete Fourier transform at frequency $f_n$ is given by:\n",
      "\n",
      "$$ H(f_n) \\approx \\sum_{k=0}^{N-1} h_k \\text{e}^{-2\\pi i f_n t_k} \\Delta $$\n",
      "\n",
      "where $f_n=\\frac{n}{N\\Delta}$.\n",
      "\n",
      "\n",
      "Let's define\n",
      "\n",
      "$$ H_n = \\sum_{k=0}^{N-1} h_k \\text{e}^{-2\\pi i kn/N} $$\n",
      "\n",
      "so that\n",
      "\n",
      "$$ H(f_n) = \\Delta H_n $$\n",
      "\n",
      "Therefore, the discrete Fourier transform maps $N$ complex numbers (the $h_k$'s) into $N$ complex numbers (the $H_n$'s).\n",
      "\n",
      "If we define the complex constant $w_N$ to be:\n",
      "\n",
      "$$ w_N = \\text{e}^{-2\\pi i/N} $$\n",
      "\n",
      "then\n",
      "\n",
      "$$ H_n = \\sum_{k=0}^{N-1} w_N^{kn} h_k $$\n",
      "\n",
      "The vector of $h_k$'s is multiplied by a matrix whose $(n,k)$-th element is the constant $w_N$ to the power $nk$.\n",
      "\n",
      "The DFT of an input vector $x$ of length $N$ is defined as the matrix-vector product:\n",
      "\n",
      "$$ y = \\text{DFT}_N x $$\n",
      "\n",
      "where\n",
      "\n",
      "$$ \\text{DFT}_N = \\left[w_N^{kl}\\right]_{0\\leq k,l < N} $$\n",
      "\n",
      "with $w_N = \\text{e}^{-2\\pi i/N}$\n",
      "\n",
      "Evaluating this definition requires $O(N^2)$ operations: there are $N$ outputs and each one requires a sum of $N$ terms.\n",
      "\n",
      "The discrete Fourier transform can be in fact computed in $O(N \\log N)$ operations with an algorithm called the **fast Fourier transform** (FFT).\n",
      "\n",
      "As an example, if $N=10^8$, then\n",
      "\n",
      "$$\\frac{N^2}{N \\log N} \\approx \\frac{\\text{2 months}}{\\text{1 second}} $$\n",
      "\n",
      "**A FFT computes the DFT and produces exactly the same result as evaluating the DFT definition directly, but is much faster.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Danielson and Lanzcos (1942)\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Danielson and Lanzcos showed that a discrete Fourier transform a discrete Fourier transform of length $N$ can be rewritten as the sum of two discrete Fourier transforms, each of length $N/2$:\n",
      "\n",
      "$$\\begin{eqnarray}\n",
      "H_n &=& \\sum_{k=0}^{N-1} \\text{e}^{-2\\pi ikn/N} h_k \\\\\n",
      "&=& \\sum_{k=0}^{N/2-1} \\text{e}^{-2\\pi i(2k)n/N} h_{2k} + \\sum_{k=0}^{N/2-1} \\text{e}^{-2\\pi i(2k+1)n/N} h_{2k+1} \\\\\n",
      "&=& \\sum_{k=0}^{N/2-1} \\text{e}^{-2\\pi ikn/(N/2)} h_{2k} + w_N^n \\sum_{k=0}^{N/2-1} \\text{e}^{-2\\pi ikn/(N/2)} h_{2k+1} \\\\\n",
      "&=& H_n^e + w_N^n H_n^o\n",
      "\\end{eqnarray}$$\n",
      "\n",
      "$H_n^e$ denotes the $n$-th component of the Fourier transform of length $N/2$ formed from the even components of the $h_n$'s.\n",
      "\n",
      "$H_n^o$ is the corresponding transform of length $N/2$ formed from the odd components.\n",
      "\n",
      "The Danielson-Lanzcos lemma can be used **recursively**!\n",
      "\n",
      "We have reduced the problem of computing $H_n$ to that of computing $H_n^e$ and $H_n^o$. we can do the same reduction of $H_n^e$ to the problem of computing the transform of its $N/4$ even-numbered input data and $N/4$ odd-numbered data."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Kronecker product formalism\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "The **Kronecker product** of matrices $A$ and $B$ is defined as\n",
      "\n",
      "$$ A \\otimes B = \\left[ a_{k,l}B \\right]_{k,l} $$\n",
      "\n",
      "If $I_n$ represents the $n\\times n$Identity matrix, we get\n",
      "\n",
      "$$ I_n \\otimes A =\n",
      "\\begin{bmatrix}\n",
      "A &   &        &   \\\\\n",
      "  & A &        &   \\\\\n",
      "  &   & \\ddots &   \\\\\n",
      "  &   &        & A\n",
      "\\end{bmatrix} $$\n",
      "\n",
      "a $n$-block diagonal matrix where each block is the matrix $A$. \n",
      "\n",
      "For $n=4$:\n",
      "\n",
      "<center><img src=\"./images/kronecker_product.png\" style=\"width: 300px;\"/></center>\n",
      "\n",
      "We can also introduce the **iterative direct sum**:\n",
      "\n",
      "$$ \\bigoplus_{i=0}^{n-1} A_i =  \n",
      "\\begin{bmatrix}\n",
      "A_0 &   &        &   \\\\\n",
      "  & A_1 &        &   \\\\\n",
      "  &   & \\ddots &   \\\\\n",
      "  &   &        & A_{n-1}\n",
      "\\end{bmatrix} $$\n",
      "\n",
      "The Kronecker product is **not commutative**. $A \\otimes I_n$ contains also $n$ blocks of $A$, but are spread out and intervaled at stride $n$. \n",
      "\n",
      "<center><img src=\"./images/kronecker_product_2.png\" style=\"width: 300px;\"/></center>\n",
      "\n",
      "In the figure above, all elements with the same shade of gray taken together constitute one $A$. So the final matrix contains also four $A$ matrices.\n",
      "\n",
      "Finally, we introduce the **stride permutation matrix** $L_m^{mn}$ which permutes an input vector x of length $mn$ as\n",
      "\n",
      "$$ in+j \\rightarrow jm+i \\quad, \\quad 0\\leq i < m, \\quad 0\\leq j < n $$\n",
      "\n",
      "<center><img src=\"./images/permutation.png\" style=\"width: 400px;\"/></center>"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Cooley-Tukey algorithm (1965)\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "* This is the most commonly used FFT! \n",
      "\n",
      "* The algorithm recursively breaks down a DFT of any size $N = mn$ into smaller DFTs of sizes $m$ and $n$, along with $O(N)$ multiplications by complex roots of unity traditionally called twiddle factors. \n",
      "\n",
      "* Typicaly, either $m$ or $n$ is a small factor, called the radix.\n",
      "\n",
      "* Using the Kronecker product formalism, the **recursive Cooley-Tukey algorithm** is given by the following expression:\n",
      "\n",
      "$$ \\text{DFT}_{mn} = \\left( \\text{DFT}_m \\otimes I_n \\right) D_{m,n} \\left( I_m \\otimes \\text{DFT}_n \\right) L_m^{mn} $$\n",
      "\n",
      "where\n",
      "\n",
      "$$ D_{m,n} = \\bigoplus_{j=0}^{m-1} \\text{diag}\\left( \\left[w_{mn}^i\\right]_{0\\leq i < n} \\right)^j $$\n",
      "\n",
      "is called the **twiddle matrix**.\n",
      "\n",
      "The discrete Fourier transform, $y = \\text{DFT}_N x$, can now be computed in **four steps**:\n",
      "\n",
      "1. the input vector $x$ is permuted by $L_m^{mn}$,\n",
      "2. $m$ DFTs of size $n$ are computed recursively on segments of the input vector $x$,\n",
      "3. the vector is scaled element-wise by the twiddle matrix $D_{m,n}$,\n",
      "4. $n$ DFTs of size $m$ are computed recursively at stride $m$."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Computing using loops\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "The multiplication of a vector by a Kronecker product containing an identity matrix can be computed using loops.\n",
      "\n",
      "The working set for each of the $m$ iterations of $y=\\left(I_m \\otimes A_n \\right) x$ is a contiguous block of size $n$ and the base address is increased by $n$ between iterations.\n",
      "\n",
      "<center><img src=\"./images/ImxAn.png\" style=\"width: 800px;\"/></center>"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "In contrast, the working set of size $n$ of the $n$ iterations of $y = \\left( A_n \\otimes I_m \\right) x$ are intervaled with stride $m$ within one iteration.\n",
      "\n",
      "<center><img src=\"./images/AmxIn.png\" style=\"width: 800px;\"/></center>"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "### $\\star$ Cache optimization\n",
      "---"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### Blocking\n",
      "\n",
      "We choose a **radix-$4$ implementation** of the recursive Cooley-Tukey algorithm with $N=4^n$, which means that $N$ is factorized as follow: $N = 4 . 4^{n-1}$. The **radix** $m=4$ is chosen to be the **block size**.\n",
      "\n",
      "Therefore, the discrete Fourier transform of an input vector $x$ of length $N=4^n$ is computed using the follwing recursion:\n",
      "\n",
      "$$ \\text{DFT}_{4^n} = \\left( \\text{DFT}_4 \\otimes I_{4^{n-1}} \\right) D_{4,4^{n-1}} \\left( I_4 \\otimes \\text{DFT}_{4^{n-1}} \\right) L_4^{4^n} $$\n",
      "\n",
      "The terms $\\text{DFT}_4$ are the recursion leaves.\n",
      "\n",
      "Let's have a look at the recursion for $n=2$ (i.e. the input length is $4^2=16$):\n",
      "\n",
      "<center>![image](files/images/dft16.png)</center>\n",
      "\n",
      "As said before, recursive algorithms naturally divide a large problem into smaller problems that typically operate on subsets of the data. If designed and parametrized well, at some level, all sub-problems fit into the targeted memory level and blocking is achieved implicitly."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### Loop merging\n",
      "\n",
      "* Naive implementation: recursive function with **four stages** and thus **four sweeps** through the data.\n",
      "\n",
      "* The stride permutation $L_4^{4^n}$ is just a **data reordering**. Therefore, it can be merged with the next stage.\n",
      "\n",
      "* The twiddle matrix $D_{4,4^{n-1}}$ is a diagonal matrix (we **scale** the input vector with the twiddle factors). Again, it can be merged with the next stage.\n",
      "\n",
      "The recursion can be cast into **two stages** (instead of four):\n",
      "\n",
      "$$ \\text{DTF}_{4^n} = \\left( \\left( \\text{DTF}_4 \\otimes I_{4^{n-1}} \\right) D_{4,4^{n-1}} \\right) . \\left( \\left( I_4 \\otimes \\text{DTF}_{4^{n-1}} \\right)  L_4^{4^n} \\right) $$\n",
      "\n",
      "For $n=2$, this gives:\n",
      "$$ \\text{DTF}_{16} = \\left( \\left( \\text{DTF}_4 \\otimes I_4 \\right) D_{4,4} \\right) . \\left( \\left( I_4 \\otimes \\text{DTF}_4 \\right)  L_4^{16} \\right) $$\n",
      "\n",
      "<center>![image](files/images/dft16_merged.png)</center>"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### First stage\n",
      "\n",
      "As stated before, the Kronecker product $I_4 \\otimes \\text{DTF}_{4^{n-1}}$ can be computed using a loop with $4$ iterations. The same is true with $\\left( I_4 \\otimes \\text{DTF}_{4^{n-1}} \\right) L_4^{4^{n-1}}$: the input is now read at stride $4$ but the output is still written at stride $1$.\n",
      "\n",
      "<center><img src=\"./images/ImxAn_permutation.png\" style=\"width: 800px;\"/></center>\n",
      "\n",
      "* Because the input is read at stride $m=4$, the radix, and the output is written at stride $1$, $x$ and $y$ need to be different memory regions, and the function needs to have a stride as an additional parameter.\n",
      "\n",
      "* The prototype of the recursive function would be:\n",
      "\n",
      "```cpp\n",
      "void DFT_rec(int N, int n, float *Y, float *X, int s);\n",
      "```\n",
      "\n",
      "We pass `n` together with `N` to avoid computing the logarithm.\n",
      "\n",
      "\n",
      "* For $N=4$, at the leaf of the recursion, the kernel `DTF4_base` is called, whose prototype is:\n",
      "\n",
      "```cpp\n",
      "void DTF4_base(float *Y, float *X, int s);\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Second stage\n",
      "\n",
      "* The second stage of the recursive function computes $\\left( \\text{DTF}_4 \\otimes I_{4^{n-1}} \\right) D_{4,4^{n-1}}$. \n",
      "\n",
      "* First, it scales the input by a diagonal matrix and then sweeps with a $\\text{DTF}_4$ kernel over it, applied at a stride.\n",
      "\n",
      "* The $\\text{DTF}_4$s can be replaced by $\\text{DTF}_4D_j$, where $D_j$ is a $4 \\times 4$ diagonal matrix containing the proper diagonal elements from $D_{4, 4^{n-1}}$:\n",
      "\n",
      "$$D_j = \\text{diag} \\left( w_{4^n}^0, w_{4^n}^j, w_{4^n}^{2j}, w_{4^n}^{3j} \\right) , \\quad 0\\leq j < 4^{n-1}$$\n",
      "\n",
      "* Hence, the function needs a stride as parameter and `j` to compute the elements of $D_j$.\n",
      "\n",
      "* Since, the kernel reads from and writes to the same locations of the inpu and output vectors, we do not need to pass both $y$ and $x$ as arguments of the function.\n",
      "\n",
      "* The prototype of the function would be:\n",
      "\n",
      "```cpp\n",
      "void DTF4_twiddle(float *Y, int s, int n, int j);\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Overall, a blocked, loop-merged code would look like:\n",
      "\n",
      "```cpp\n",
      "// recursive radix-4 DFT implementation\n",
      "\n",
      "#include <math.h>\n",
      "\n",
      "// compute the exponent\n",
      "#define log4(N) (int)(log(N)/log(4))\n",
      "\n",
      "// top-level call to DFT function\n",
      "#define DFT(N, Y, X) DTF_rec(N, log4(N), Y, X, 1)\n",
      "\n",
      "// prototypes\n",
      "void DTF4_base(float *Y, float *X, int s);\n",
      "void DTF4_twiddle(float *Y, int s, int n, int j);\n",
      "\n",
      "void DTF_rec(int N, int n, float *Y, float *X, int s)\n",
      "{\n",
      "    if (N==4) // leaf of the recursion\n",
      "    {\n",
      "        DTF4_base(Y, X, s);\n",
      "    }\n",
      "    else // recursive call\n",
      "    {\n",
      "        // first stage\n",
      "        for (int j=0; j<4; j++)\n",
      "        {\n",
      "            DTF_rec(N/4, n-1, Y+(N/4)*j, X+j*s, s*4);\n",
      "        }\n",
      "        // second stage\n",
      "        for (int j=0; j<N/4; j++)\n",
      "        {\n",
      "            DTF4_twiddle(Y+j, N/4, n, j);\n",
      "        }\n",
      "    }\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### Buffering\n",
      "\n",
      "* The kernel `DTF4_twiddle` accesses both input and output in a stride. For large sizes $N=4^n$, this stride, $4^{n-1}$, is large!\n",
      "\n",
      "* Cache memory is a small, fast memory that resides between the main memory and the processor. When the processor initially requests data from a memory location, the cache fetches and stores the requested data and data spatially close.\n",
      "\n",
      "* Cache are divided into cache lines and sets. Data is moved in and out of cache memory in chunks equal to the line size.\n",
      "\n",
      "* Cache may be **direct mapped** (every main memory location is mapped to a specific cache location) or **$k$-way set associative** (every main memory location can be mapped to precisely $k$ possible cache locations).\n",
      "\n",
      "* For instance: Phantom has $8$ CPUs ($1$ socket, $4$ cores per socket, $2$ threads per core). Each core has its own caches:\n",
      "\n",
      "    * $L_{1d}$: $32K$, $8$-way set associative\n",
      "\n",
      "    * $L_{1i}$: $32K$, $8$-way set associative\n",
      "\n",
      "    * $L_{2}$: $256K$, $8$-way set associative\n",
      "\n",
      "    * $L_{3}$: $8MB$, $16$-way set associative\n",
      "\n",
      "* In addition to misses caused due to data being brought in for the first time (compulsory misses) and those due to cache capacity constraints (capacity misses), caches that are not fully associative can incur conflict misses.\n",
      "\n",
      "* **Cache trashing** occurs when main memory is accessed in a pattern that leads to multiple main memory locations competing for the same cache lines.\n",
      "\n",
      "* Each iteration of the `DFT4_twiddle` loop has to load $4$ cache lines and all these cache lines get evicted before the next iteration of the `DFT4_twiddle` loop can use the already loaded remaining cache lines.\n",
      "\n",
      "* We assume a cache line size of $LS$ real numbers.\n",
      "\n",
      "* To implement buffering, we first split the `j` loop (containing $N/4$ iterations) into an inner loop ($N/4/LS$ iterations) and an outer loop ($LS$ iterations).\n",
      "\n",
      "* The large, performance degrading stride $4^{n-1}$ in the original `j` loop gets replaced by a small stride $LS$ in the `j2` at the **cost of two copy operations** that copy whole cache lines.\n",
      "\n",
      "* The threshold parameter `th` controls the sizes for which the second loop gets buffered."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "```cpp\n",
      "// cache line size of 64 bytes (16 floats)\n",
      "#define LS 16\n",
      "\n",
      "void DFT_buf_rec(int N, int n, float *Y, float *X, int s, int th)\n",
      "{\n",
      "    // local buffer\n",
      "    float buf[4*LS];\n",
      "    \n",
      "    if (N==4) // recursion leaf\n",
      "    {\n",
      "        DTF4_base(Y, X, s);\n",
      "    }\n",
      "    else // recursive call\n",
      "    {\n",
      "        // first stage\n",
      "        if (N>th)\n",
      "        {\n",
      "            for (int j=0; j<4; j++)\n",
      "            {\n",
      "                DFT_buf_rec(N/4, n-1, Y+(N/4)*j, X+j*s, s*4, th);\n",
      "            }\n",
      "        }\n",
      "        else\n",
      "        {\n",
      "            for (int j=0; j<4; j++)\n",
      "            {\n",
      "                DFT_rec(N/4, n-1, Y+(N/4)*j, X+j*s, s*4);\n",
      "            }\n",
      "        }\n",
      "        // second stage\n",
      "        for (int j1=0; j1<N/(4*LS); j1++)\n",
      "        {\n",
      "            // copy 4 chunks of LS float to local buffer\n",
      "            for (int i=0; i<4; i++)\n",
      "            {\n",
      "                for (int k=0; k<LS; k++)\n",
      "                {\n",
      "                    buf[LS*i+k] = Y[LS*j1+(N/4)*i+k];\n",
      "                }\n",
      "            }\n",
      "            // perform LS DTF4 on contiguous data\n",
      "            for (int j2=0; j2<LS; j2++)\n",
      "            {\n",
      "                DTF4_twiddle(buf+j2, LS, n, j1*LS+j2);\n",
      "            }\n",
      "            // copy 4 chunks of LS float to output\n",
      "            for (int i=0; , i<4; i++)\n",
      "            {\n",
      "                for (int k=0; k<LS; k++)\n",
      "                {\n",
      "                    Y[LS*j1+(N/4)*i+k] = buf[LS*i+k]\n",
      "                }\n",
      "            }\n",
      "        }\n",
      "    }\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "#### CPU and register level optimization\n",
      "\n",
      "We describe the implementation of the DFT base kernels.\n",
      "\n",
      "The recursive expresion applied to $DFT_4$ is given by:\n",
      "\n",
      "$$ \\text{DFT}_4 = \\left( \\text{DFT}_2 \\otimes I_2 \\right) D_{4,2} \\left( I_2 \\otimes \\text{DFT}_2 \\right) L_2^4 $$\n",
      "\n",
      "Because it is a recursive formula, the implementation will be implicitely automatically blocked."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Scalar replacement, Unrolling and Scheduling\n",
      "\n",
      "Objective: to keep the CPU as busy as possible!\n",
      "\n",
      "```cpp\n",
      "// DTF4 implementation\n",
      "void DFT4_base(double *Y, double *X, int s)\n",
      "{\n",
      "    double t0, t1, t2, t3, t4, t5, t6, t7;\n",
      "    t0 = (X[0] + X[4*s]);\n",
      "    t1 = (X[2*s] + X[6*s]);\n",
      "    t2 = (X[1] + X[4*s+1]);\n",
      "    t3 = (X[2*s+1] + X[6*s+1]);\n",
      "    t4 = (X[0] - X[4*s]);\n",
      "    t5 = (X[2*s+1] - X[6*s+1]);\n",
      "    t6 = (X[1] - X[4*s+1]);\n",
      "    t7 = (X[2*s] - X[6*s]);\n",
      "    Y[0] = (t0 + t1);\n",
      "    Y[1] = (t2 + t3);\n",
      "    Y[4] = (t0 - t3);\n",
      "    Y[5] = (t2 - t1);\n",
      "    Y[2] = (t4 - t5);\n",
      "    Y[3] = (t6 + t7);\n",
      "    Y[6] = (t4 + t5);\n",
      "    Y[7] = (t6 - t7);\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "##### Precomputation of constants\n",
      "\n",
      "* The kernel `DFT4_twiddle` computes $y = \\left( \\text{DFT}_4 D_j \\right) x$, which contains multiplication with the complex diagonal $D_j$.\n",
      "\n",
      "* The entries of $D_j$ are complex roots of unity (twiddle factors) that depend on the recursion level and the loop counter `j`.\n",
      "\n",
      "* Computing the actual entries of $D_j$ requires evaluations of $\\sin \\frac{k\\pi}{N}$ and $\\cos \\frac{k\\pi}{N}$, which requires expensive calls to the math library.\n",
      "\n",
      "* These numbers should be precomputed.\n",
      "\n",
      "* We introduce an initilization function `init_DFT` that precomputes all diagonals required for size $N$ and stores pointers to the tables (one table for each recursion level) in the global variable `double **DN`.\n",
      "\n",
      "```cpp\n",
      "# define PI 3.14159265358979323846\n",
      "\n",
      "double **DN;\n",
      "\n",
      "void init_DFT(int N)\n",
      "{\n",
      "    int i, j, k, size_Dj=16, n_max=log4(N);\n",
      "    DN = malloc(sizeof(double*)*(n_max-1));\n",
      "    \n",
      "    for (j=1; j<n_max; j++, size_Dj*=4)\n",
      "    {\n",
      "        double *Dj = DN[j-1] = malloc(2*sizeof(double)8size_Dj);\n",
      "        for (k=0; k<size_Dj/4; k++)\n",
      "        {\n",
      "            for (i=0; i<4; i++)\n",
      "            {\n",
      "                *(Dj++) = cos(2*PI*i*k/size_Dj);\n",
      "                *(Dj++) = sin(2*PI*i*k/size_Dj);\n",
      "            }\n",
      "        }\n",
      "    }\n",
      "}\n",
      "```"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "---\n",
      "## References\n",
      "[[1]](http://users.ece.cmu.edu/~franzf/papers/gttse07.pdf) Chellappa et al. *How to Write Fast Numerical Code: A Small Introduction*. Generative and Transformational Techniques in Software Engineering II, Springer, pp 196--259 (2008).\n",
      "\n",
      "[2] William et al. *Numerical Recipes: The Art of Scientific Computing, Third edition*. Cambridge University Press (2007).\n",
      "\n",
      "[[3]](http://suif.stanford.edu/papers/lam-asplos91.pdf) Lam M., Rothberg E. and Wolf M. *The Cache Performance and Optimizations of Blocked Algorithms*. ACM SIGOPS Operating Systems Review vol.25, pp 63--74 (1991)."
     ]
    }
   ],
   "metadata": {}
  }
 ]
}