<!DOCTYPE html>
<html lang="en">
<head>
  <title>IceFormer</title>
  <meta charset="utf-8">
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
  <link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/jquery.slick/1.6.0/slick.css"/>
  <link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/jquery.slick/1.6.0/slick-theme.css"/>
  <script type="text/javascript" src="https://cdn.jsdelivr.net/jquery.slick/1.6.0/slick.min.js"></script>
  <script type="text/javascript" src="https://code.jquery.com/jquery-migrate-1.2.1.min.js"></script>
    <link rel="stylesheet" href="./website/index.css">
  <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
  <script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"></script>
    <link href="https://gitcdn.github.io/bootstrap-toggle/2.2.2/css/bootstrap-toggle.min.css" rel="stylesheet">
<script src="https://gitcdn.github.io/bootstrap-toggle/2.2.2/js/bootstrap-toggle.min.js"></script>
</head>
<body>

<a href="https://github.com/yuzhenmao/IceFormer" class="github-corner" aria-label="View source on GitHub"><svg width="80" height="80" viewBox="0 0 250 250" style="fill:#151513; color:#fff; position: fixed; top: 0; border: 0; right: 0;" aria-hidden="true"><path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path><path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path></svg></a><style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>

<div class="container-fluid text-center">
    <div class="row content">
        <h1 class="display-1">IceFormer</h1>
      <p class="lead">Accelerated Inference with Long-Sequence Transformers on CPUs</p>
        <div class="col-sm-2"></div>
        <div class="col-sm-1"></div>
        <div class="col-sm-2">
            <a href="https://scholar.google.com/citations?user=9wKn1A0AAAAJ&hl=en"><p class="name">Yuzhen Mao</p></a>
            <p class="affiliation">School of Computing Science<br>Simon Fraser University</p>
        </div>
        <div class="col-sm-2">
            <a href="https://sites.google.com/view/esterlab"><p class="name">Martin Ester</p></a>
            <p class="affiliation">School of Computing Science<br>Simon Fraser University</p>
        </div>
        <div class="col-sm-2">
            <a href="https://www.sfu.ca/~keli/"><p class="name">Ke Li</p></a>
            <p class="affiliation">School of Computing Science<br>Simon Fraser University</p>
        </div>
        <div class="col-sm-3"></div>
    </div>

  <div class="col-sm-4 sidenav text-left">
      <div class="container-fluid">
          <h3 class="shift">Contents</h3>
      <ul class="nav nav-pills nav-stacked">
        <li><a href="#overview">Overview</a></li>
        <li><a href="#knn">Ranking-based vs Bucketing-based k-NNS</a></li>
        <li><a href="#b1">Benchmarking nearest neighbors</a></li>
        <li><a href="#b2">Benchmarking LRA</a></li>
        <li><a href="#b3">Benchmarking LLM</a></li>
        <li><a href="#citation">Citation</a></li>
      </ul><br>
      </div>

    </div>

  <div>
    <a href="https://iclr.cc/Conferences/2024">ICLR 2024</a>
  </div>

  <div class="col-sm-4 sidenav barright text-left">
      <div class="container-fluid">
          <h3>Links</h3>
          <div class="row">
              <div class="col-sm-6">
                  <a href="https://arxiv.org/abs/2405.02842"><img src="./website/paper_icon.jpg" alt="Paper" width=48px></a>
                  <p>Paper</p>
              </div>
              <div class="col-sm-6">
                  <a href="https://github.com/yuzhenmao/IceFormer"><img src="./website/github.jpg" alt="Github" width=48px></a>
                  <p>Code</p>
              </div>
          </div>
      </div>
    </div>

  <div class="row content">
    <div class="col-sm-3">
    </div>

    <div class="col-sm-6 text-left">

        <div class="video-container">
            <iframe class="responsive-iframe" src="https://www.youtube.com/embed/6W0DtYRzFng" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
        </div>

        <hr>

      <h2 id="overview">Overview</h2>
        <!-- <p>We use our method to increase the width and height of input images by a factor of 16x.
            Toggle for results from our method (CHIMLE) and those of leading baselines,
            including GAN-based general-purpose methods (<a href="https://arxiv.org/abs/1711.11586">BicycleGAN</a>,
            <a href="https://arxiv.org/abs/1903.05628">MSGAN</a>, <a href="https://arxiv.org/abs/2103.07893">DivCo</a>
            and <a href="https://arxiv.org/abs/2203.09333">MoNCE</a>) , diffusion-based general-purpose methods
            (<a href="https://arxiv.org/abs/2201.11793">DDRM</a> and <a href="https://arxiv.org/abs/2207.09786">NDM</a>),
            a task specific method (<a href="https://arxiv.org/abs/2005.12597">RFB-ESRGAN</a>) and
            <a href="https://arxiv.org/abs/2004.03590">conditional IMLE (cIMLE)</a>.
        </p> -->

        <p>Below is the overview of the <a href="https://arxiv.org/abs/1706.03762">Transformer</a> (top row) and our method, IceFormer (bottom row). 
          We illustrate with one query and k=2 in k-NNS. In the two attention matrices presented, 
          the top-2 largest attention weights in each row are represented by a dark color. 
          The remaining attention weights are shown in a pale color in the vanilla attention matrix, 
          and are set to zero (depicted in white) in the sparse attention matrix.
        </p>

        <img width=100% controls autoplay muted loop src="./website/pipeline.png">

        <hr>

     <h2 id="knn">Ranking-based VS. Bucketing-based k-NNS</h2>
      <p>
        We illustrate the difference between ranking-based and bucketing-based k-NNS. As shown in the figure below, 
        bucketing-based places keys into discrete buckets and searches over buckets that contain the query. 
        On the other hand, ranking-based algorithms compares the
        rankings of different keys relative to the query and searches over highly ranked keys.
      </p>
      <p>
        We pick an ranking-based k-NNS, <a href="https://arxiv.org/abs/1703.00440">Prioritized-DCI</a>, for accelerating attention, 
        because attention weights depend on how different keys compare to one another,
        rather than an absolute evaluation of each key against a fixed threshold. Therefore, ranking-based
        algorithms is better aligned with how attention weights are calculated and so can yield better recall of
        truly important keys.
      </p>
        <img width=100% controls autoplay muted loop src="./website/LSH.png">
        <hr>


    <h2 id="b1">Benchmarking nearest neighbors</h2>
        <p>
          We compare the recall of true nearest neighbors and total construction and querying time of twelve k-NNS algorithms including 
          <a href="https://arxiv.org/abs/1703.00440">Prioritized-DCI</a> on fashion-mnist-784 dataset using the <a href="https://github.com/erikbern/ann-benchmarks">ann-benchmarks</a>. 
          As shown in the figure below, Prioritized-DCI can achieve the best recall-latency trade-off across all the tested algorithms, 
          which illustrates its suitability in the setting when the construction and querying time are both important.
        </p>
        <img width=100% controls autoplay muted loop src="./website/knn-bench.png">
        <hr>


        <h2 id="b2">Benchmarking LRA</h2>
        <p>
          We compare IceFormer on the <a href="https://arxiv.org/abs/2011.04006">LRA</a> benchmark with other five baselines, including
          <a href="https://arxiv.org/abs/1706.03762">Transformer</a>,
          <a href="https://arxiv.org/abs/2001.04451">Reformer</a>,
          <a href="https://arxiv.org/abs/2102.03902">Nyströmformer</a>, 
          <a href="https://arxiv.org/abs/2204.04667">LARA</a>, and
          <a href="https://arxiv.org/abs/2107.11906">H-Transformer-1D</a>.
          From the figure below, our proposed IceFormer consistently outperforms all the baselines (up to 2.5× speed-up compared to the second best baselines), offering
          the least inference time across all five tasks.
        </p>
        <img width=100% controls autoplay muted loop src="./website/LRA_latency.png">

        <hr>

        <p>
          We also compare the maximum memory usage for each method
          during inference. The figure below reveals that IceFormer consistently exhibits the
          lowest peak memory usage across all tasks. In comparison to the vanilla Transformer, IceFormer
          achieves memory savings of up to 0.862 GB.
        </p>

        <img width=100% controls autoplay muted loop src="./website/LRA_mem.png">

        <hr>

        <h2 id="b3">Benchmarking LLM</h2>

        We utilize IceFormer to accelerate the prompt processing process of Large Lanugauage Models (LLM). 
        We pick <a href="https://huggingface.co/lmsys/vicuna-7b-v1.5-16k">Vicuna-7b-v1.5-16k</a>, which
        is fine-tuned from <a href="https://llama.meta.com/llama2/">Llama 2</a> and supports a context length of up to 16,000 tokens.

        <hr>

        <h4>LongEval</h4>

        <p>
          We conduct experiments on the <a href="https://lmsys.org/blog/2023-06-29-longchat/">LongEval</a> benchmark 
          to assess the long-range retrieval capability of LLMs across different context lengths.
          In the figure below, We present the averaged latency of the attention
          module corresponding to different input prompt length as well as the inference accuracy. 
          From the figure, IceFormer can achieve nearly identical inference accuracy compared with the vanilla Vicuna-7b-v1.5-16k. 
          Furthermore, as the length of the prompt increases, the difference in the latency between IceFormer and the vanilla Transformer becomes
          larger.
        </p>

        <img width=100% controls autoplay muted loop src="./website/LongEval.png">

        <hr>

        <h4>ZeroScrolls</h4>

        <p>
          To evaluate IceFormer on more complex academic long-range reasoning tasks, we pick the Qasper question-answering dataset from the
           <a href="https://arxiv.org/abs/2305.14196">ZeroScrolls</a> long sequence benchmark and use its validation set selection and prompts. 
           As shown in the table below, IceFormer achieves 2.3× speed-up compared to standard self-attention while
          attaining 99.4% of the vanilla unaccelerated model performance at the same time.
        </p>

        <img width=100% controls autoplay muted loop src="./website/Zeroscroll.png">

        <hr>

        <div id="citation">
        <h2>Citation</h2>
            <pre><code>@inproceedings{
  mao2024iceformer,
  title={IceFormer: Accelerated Inference with Long-Sequence Transformers on {CPU}s},
  author={Yuzhen Mao and Martin Ester and Ke Li},
  booktitle={The Twelfth International Conference on Learning Representations},
  year={2024},
}</code></pre>
          </div>
        </div>



    <div class="col-sm-3 sidenav">
    </div>
  </div>
</div>

<script type="text/javascript">
    var show_option = 4;
    $(document).ready(function(){
      $('.my-carousel').slick({
        centerMode: true,
	   dots: true,
	  centerPadding: '20px',
	  slidesToShow: 3,
	  infinite: true,
	  autoplay: true,
  	  // autoplaySpeed: 2000,
  	  autoplaySpeed: 4000,
  	  arrows: true,
  	  focusOnSelect: true,
  	  slidesToScroll: 3,
  	  responsive: [
    {
      breakpoint: 720,
      settings: {
        slidesToShow: 2,
        slidesToScroll: 2
      }
    },
    {
      breakpoint: 512,
      settings: {
        slidesToShow: 1,
        slidesToScroll: 1
      }
    }
  ]
      });
    });

$(document).ready(function(){
  // Add smooth scrolling to all links
  $("a").on('click', function(event) {

    // Make sure this.hash has a value before overriding default behavior
    if (this.hash !== "") {
      // Prevent default anchor click behavior
      event.preventDefault();

      // Store hash
      var hash = this.hash;

      // Using jQuery's animate() method to add smooth page scroll
      // The optional number (800) specifies the number of milliseconds it takes to scroll to the specified area
      $('html, body').animate({
        scrollTop: $(hash).offset().top
      }, 600, function(){

        // Add hash (#) to URL when done scrolling (default click behavior)
        window.location.hash = hash;
      });
    } // End if
  });
});

  </script>



</body>
</html>